]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | #include <assym.s> | |
29 | #include <debug.h> | |
1c79356b A |
30 | #include <db_machine_commands.h> |
31 | #include <mach_rt.h> | |
32 | ||
33 | #include <mach_debug.h> | |
34 | #include <ppc/asm.h> | |
35 | #include <ppc/proc_reg.h> | |
36 | #include <ppc/exception.h> | |
37 | #include <ppc/Performance.h> | |
38 | #include <ppc/exception.h> | |
1c79356b | 39 | #include <mach/ppc/vm_param.h> |
1c79356b A |
40 | |
41 | .text | |
42 | ||
55e303ae A |
43 | ; |
44 | ; 0 0 1 2 3 4 4 5 6 | |
45 | ; 0 8 6 4 2 0 8 6 3 | |
46 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
47 | ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA | |
48 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
49 | ; | |
50 | ; 0 0 1 | |
51 | ; 0 8 6 | |
52 | ; +--------+--------+--------+ | |
53 | ; |//////BB|BBBBBBBB|BBBB////| - SID - base | |
54 | ; +--------+--------+--------+ | |
55 | ; | |
56 | ; 0 0 1 | |
57 | ; 0 8 6 | |
58 | ; +--------+--------+--------+ | |
59 | ; |////////|11111111|111111//| - SID - copy 1 | |
60 | ; +--------+--------+--------+ | |
61 | ; | |
62 | ; 0 0 1 | |
63 | ; 0 8 6 | |
64 | ; +--------+--------+--------+ | |
65 | ; |////////|//222222|22222222| - SID - copy 2 | |
66 | ; +--------+--------+--------+ | |
67 | ; | |
68 | ; 0 0 1 | |
69 | ; 0 8 6 | |
70 | ; +--------+--------+--------+ | |
71 | ; |//////33|33333333|33//////| - SID - copy 3 - not needed | |
72 | ; +--------+--------+--------+ for 65 bit VPN | |
73 | ; | |
74 | ; 0 0 1 2 3 4 4 5 5 | |
75 | ; 0 8 6 4 2 0 8 1 5 | |
76 | ; +--------+--------+--------+--------+--------+--------+--------+ | |
77 | ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all | |
78 | ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed | |
79 | ; 0 0 1 2 3 4 4 5 5 | |
80 | ; 0 8 6 4 2 0 8 1 5 | |
81 | ; +--------+--------+--------+--------+--------+--------+--------+ | |
82 | ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA | |
83 | ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment" | |
84 | ; part of EA to make | |
85 | ; room for SID base | |
86 | ; | |
87 | ; | |
88 | ; 0 0 1 2 3 4 4 5 5 | |
89 | ; 0 8 6 4 2 0 8 1 5 | |
90 | ; +--------+--------+--------+--------+--------+--------+--------+ | |
91 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed | |
92 | ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA | |
93 | ; | |
94 | ; 0 0 1 2 3 4 4 5 6 7 7 | |
95 | ; 0 8 6 4 2 0 8 6 4 2 9 | |
96 | ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ | |
97 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN | |
98 | ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ | |
99 | ; | |
1c79356b A |
100 | |
101 | ||
55e303ae | 102 | /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping |
1c79356b | 103 | * |
55e303ae | 104 | * Maps a page or block into a pmap |
de355530 | 105 | * |
55e303ae | 106 | * Returns 0 if add worked or the vaddr of the first overlap if not |
1c79356b | 107 | * |
55e303ae A |
108 | * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates |
109 | * | |
110 | * 1) bump mapping busy count | |
111 | * 2) lock pmap share | |
112 | * 3) find mapping full path - finds all possible list previous elements | |
113 | * 4) upgrade pmap to exclusive | |
114 | * 5) add mapping to search list | |
115 | * 6) find physent | |
116 | * 7) lock physent | |
117 | * 8) add to physent | |
118 | * 9) unlock physent | |
119 | * 10) unlock pmap | |
120 | * 11) drop mapping busy count | |
121 | * | |
122 | * | |
123 | * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates | |
124 | * | |
125 | * 1) bump mapping busy count | |
126 | * 2) lock pmap share | |
127 | * 3) find mapping full path - finds all possible list previous elements | |
128 | * 4) upgrade pmap to exclusive | |
129 | * 5) add mapping to search list | |
130 | * 6) unlock pmap | |
131 | * 7) drop mapping busy count | |
132 | * | |
1c79356b A |
133 | */ |
134 | ||
135 | .align 5 | |
136 | .globl EXT(hw_add_map) | |
137 | ||
138 | LEXT(hw_add_map) | |
55e303ae A |
139 | |
140 | stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
141 | mflr r0 ; Save the link register | |
142 | stw r17,FM_ARG0+0x00(r1) ; Save a register | |
143 | stw r18,FM_ARG0+0x04(r1) ; Save a register | |
144 | stw r19,FM_ARG0+0x08(r1) ; Save a register | |
145 | mfsprg r19,2 ; Get feature flags | |
146 | stw r20,FM_ARG0+0x0C(r1) ; Save a register | |
147 | stw r21,FM_ARG0+0x10(r1) ; Save a register | |
148 | mtcrf 0x02,r19 ; move pf64Bit cr6 | |
149 | stw r22,FM_ARG0+0x14(r1) ; Save a register | |
150 | stw r23,FM_ARG0+0x18(r1) ; Save a register | |
151 | stw r24,FM_ARG0+0x1C(r1) ; Save a register | |
152 | stw r25,FM_ARG0+0x20(r1) ; Save a register | |
153 | stw r26,FM_ARG0+0x24(r1) ; Save a register | |
154 | stw r27,FM_ARG0+0x28(r1) ; Save a register | |
155 | stw r28,FM_ARG0+0x2C(r1) ; Save a register | |
156 | stw r29,FM_ARG0+0x30(r1) ; Save a register | |
157 | stw r30,FM_ARG0+0x34(r1) ; Save a register | |
158 | stw r31,FM_ARG0+0x38(r1) ; Save a register | |
159 | stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
160 | ||
91447636 A |
161 | #if DEBUG |
162 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
163 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
164 | bne hamPanic ; Call not valid for guest shadow assist pmap | |
165 | #endif | |
166 | ||
55e303ae A |
167 | rlwinm r11,r4,0,0,19 ; Round down to get mapping block address |
168 | mr r28,r3 ; Save the pmap | |
169 | mr r31,r4 ; Save the mapping | |
170 | bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint) | |
171 | lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap | |
172 | lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping | |
173 | ||
174 | b hamSF1x ; Done... | |
175 | ||
176 | hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap | |
177 | ld r21,mbvrswap(r11) ; Get conversion mask for mapping | |
178 | ||
179 | hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
180 | ||
181 | mr r17,r11 ; Save the MSR | |
182 | xor r28,r28,r20 ; Convert the pmap to physical addressing | |
183 | xor r31,r31,r21 ; Convert the mapping to physical addressing | |
184 | ||
185 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
186 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
187 | mr. r3,r3 ; Did we get the lock? | |
188 | lwz r24,mpFlags(r31) ; Pick up the flags | |
189 | bne-- hamBadLock ; Nope... | |
190 | ||
191 | li r21,0 ; Remember that we have the shared lock | |
1c79356b | 192 | |
55e303ae A |
193 | ; |
194 | ; Note that we do a full search (i.e., no shortcut level skips, etc.) | |
195 | ; here so that we will know the previous elements so we can dequeue them | |
196 | ; later. | |
197 | ; | |
de355530 | 198 | |
55e303ae A |
199 | hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half |
200 | lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half | |
201 | mr r3,r28 ; Pass in pmap to search | |
202 | lhz r23,mpBSize(r31) ; Get the block size for later | |
203 | mr r29,r4 ; Save top half of vaddr for later | |
204 | mr r30,r5 ; Save bottom half of vaddr for later | |
205 | ||
55e303ae A |
206 | bl EXT(mapSearchFull) ; Go see if we can find it |
207 | ||
3a60a9f5 A |
208 | li r22,lo16(0x800C) ; Get 0xFFFF800C |
209 | rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu | |
210 | addi r23,r23,1 ; Get actual length | |
211 | rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25 | |
55e303ae | 212 | lis r0,0x8000 ; Get 0xFFFFFFFF80000000 |
3a60a9f5 A |
213 | slw r9,r23,r22 ; Isolate the low part |
214 | rlwnm r22,r23,r22,22,31 ; Extract the high order | |
215 | addic r23,r9,-4096 ; Get the length to the last page | |
216 | add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit | |
217 | addme r22,r22 ; Do high order as well... | |
55e303ae | 218 | mr. r3,r3 ; Did we find a mapping here? |
3a60a9f5 A |
219 | or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry |
220 | bne-- hamOverlay ; We found a mapping, this is no good, can not double map... | |
221 | ||
55e303ae A |
222 | addc r9,r0,r23 ; Add size to get last page in new range |
223 | or. r0,r4,r5 ; Are we beyond the end? | |
224 | adde r8,r29,r22 ; Add the rest of the length on | |
55e303ae A |
225 | rlwinm r9,r9,0,0,31 ; Clean top half of sum |
226 | beq++ hamFits ; We are at the end... | |
3a60a9f5 | 227 | |
55e303ae A |
228 | cmplw cr1,r9,r5 ; Is the bottom part of our end less? |
229 | cmplw r8,r4 ; Is our end before the next (top part) | |
230 | crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal? | |
231 | cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less | |
232 | ||
233 | bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay... | |
d7e50217 | 234 | |
55e303ae A |
235 | ; |
236 | ; Here we try to convert to an exclusive lock. This will fail if someone else | |
237 | ; has it shared. | |
238 | ; | |
239 | hamFits: mr. r21,r21 ; Do we already have the exclusive lock? | |
240 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1c79356b | 241 | |
55e303ae A |
242 | bne-- hamGotX ; We already have the exclusive... |
243 | ||
244 | bl sxlkPromote ; Try to promote shared to exclusive | |
245 | mr. r3,r3 ; Could we? | |
246 | beq++ hamGotX ; Yeah... | |
247 | ||
248 | ; | |
249 | ; Since we could not promote our lock, we need to convert to it. | |
250 | ; That means that we drop the shared lock and wait to get it | |
251 | ; exclusive. Since we release the lock, we need to do the look up | |
252 | ; again. | |
253 | ; | |
d7e50217 | 254 | |
55e303ae A |
255 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
256 | bl sxlkConvert ; Convert shared to exclusive | |
257 | mr. r3,r3 ; Could we? | |
258 | bne-- hamBadLock ; Nope, we must have timed out... | |
1c79356b | 259 | |
55e303ae A |
260 | li r21,1 ; Remember that we have the exclusive lock |
261 | b hamRescan ; Go look again... | |
1c79356b | 262 | |
55e303ae | 263 | .align 5 |
1c79356b | 264 | |
3a60a9f5 | 265 | hamGotX: mr r3,r28 ; Get the pmap to insert into |
55e303ae A |
266 | mr r4,r31 ; Point to the mapping |
267 | bl EXT(mapInsert) ; Insert the mapping into the list | |
268 | ||
91447636 | 269 | rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table |
55e303ae | 270 | lhz r8,mpSpace(r31) ; Get the address space |
91447636 | 271 | lwz r11,lgpPcfg(r11) ; Get the page config |
55e303ae | 272 | mfsdr1 r7 ; Get the hash table base/bounds |
2d21ac55 | 273 | |
55e303ae | 274 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count |
2d21ac55 A |
275 | lwz r12,pmapResidentMax(r28) ; r12 = pmap->stats.resident_max |
276 | addi r4,r4,1 ; Bump up the mapped page count | |
277 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
278 | cmplw r12,r4 ; if pmap->stats.resident_max >= pmap->stats.resident_count | |
279 | bge+ hamSkipMax ; goto hamSkipResMax | |
280 | stw r4,pmapResidentMax(r28) ; pmap->stats.resident_max = pmap->stats.resident_count | |
91447636 | 281 | |
2d21ac55 | 282 | hamSkipMax: andi. r0,r24,mpType ; Is this a normal mapping? |
55e303ae A |
283 | |
284 | rlwimi r8,r8,14,4,17 ; Double address space | |
91447636 | 285 | rlwinm r9,r30,0,4,31 ; Clear segment |
55e303ae A |
286 | rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14) |
287 | rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash | |
288 | rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size) | |
289 | rlwinm r7,r7,0,16,31 ; Isolate length mask (or count) | |
91447636 | 290 | srw r9,r9,r11 ; Isolate just the page index |
55e303ae | 291 | xor r10,r10,r8 ; Calculate the low 32 bits of the VSID |
2d21ac55 | 292 | |
55e303ae A |
293 | xor r9,r9,r10 ; Get the hash to the PTEG |
294 | ||
91447636 | 295 | bne-- hamDoneNP ; Not a normal mapping, therefore, no physent... |
55e303ae A |
296 | |
297 | bl mapPhysFindLock ; Go find and lock the physent | |
298 | ||
299 | bt++ pf64Bitb,ham64 ; This is 64-bit... | |
300 | ||
301 | lwz r11,ppLink+4(r3) ; Get the alias chain pointer | |
302 | rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size | |
303 | slwi r9,r9,6 ; Make PTEG offset | |
304 | ori r7,r7,0xFFC0 ; Stick in the bottom part | |
91447636 | 305 | rlwinm r12,r11,0,~ppFlags ; Clean it up |
55e303ae A |
306 | and r9,r9,r7 ; Wrap offset into table |
307 | mr r4,r31 ; Set the link to install | |
308 | stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid) | |
309 | stw r12,mpAlias+4(r31) ; Move to the mapping | |
310 | bl mapPhyCSet32 ; Install the link | |
311 | b hamDone ; Go finish up... | |
312 | ||
313 | .align 5 | |
1c79356b | 314 | |
91447636 | 315 | ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer |
55e303ae A |
316 | subfic r7,r7,46 ; Get number of leading zeros |
317 | eqv r4,r4,r4 ; Get all ones | |
318 | ld r11,ppLink(r3) ; Get the alias chain pointer | |
91447636 | 319 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
320 | srd r4,r4,r7 ; Get the wrap mask |
321 | sldi r9,r9,7 ; Change hash to PTEG offset | |
322 | andc r11,r11,r0 ; Clean out the lock and flags | |
323 | and r9,r9,r4 ; Wrap to PTEG | |
324 | mr r4,r31 | |
325 | stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid) | |
326 | std r11,mpAlias(r31) ; Set the alias pointer in the mapping | |
327 | ||
328 | bl mapPhyCSet64 ; Install the link | |
329 | ||
330 | hamDone: bl mapPhysUnlock ; Unlock the physent chain | |
1c79356b | 331 | |
55e303ae A |
332 | hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
333 | bl sxlkUnlock ; Unlock the search list | |
1c79356b | 334 | |
55e303ae A |
335 | mr r3,r31 ; Get the mapping pointer |
336 | bl mapDropBusy ; Drop the busy count | |
1c79356b | 337 | |
55e303ae A |
338 | li r3,0 ; Set successful return |
339 | li r4,0 ; Set successful return | |
1c79356b | 340 | |
55e303ae | 341 | hamReturn: bt++ pf64Bitb,hamR64 ; Yes... |
1c79356b | 342 | |
55e303ae A |
343 | mtmsr r17 ; Restore enables/translation/etc. |
344 | isync | |
345 | b hamReturnC ; Join common... | |
1c79356b | 346 | |
55e303ae A |
347 | hamR64: mtmsrd r17 ; Restore enables/translation/etc. |
348 | isync | |
1c79356b | 349 | |
3a60a9f5 | 350 | hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return |
55e303ae A |
351 | lwz r17,FM_ARG0+0x00(r1) ; Save a register |
352 | lwz r18,FM_ARG0+0x04(r1) ; Save a register | |
353 | lwz r19,FM_ARG0+0x08(r1) ; Save a register | |
354 | lwz r20,FM_ARG0+0x0C(r1) ; Save a register | |
355 | mtlr r0 ; Restore the return | |
356 | lwz r21,FM_ARG0+0x10(r1) ; Save a register | |
357 | lwz r22,FM_ARG0+0x14(r1) ; Save a register | |
358 | lwz r23,FM_ARG0+0x18(r1) ; Save a register | |
359 | lwz r24,FM_ARG0+0x1C(r1) ; Save a register | |
360 | lwz r25,FM_ARG0+0x20(r1) ; Save a register | |
361 | lwz r26,FM_ARG0+0x24(r1) ; Save a register | |
362 | lwz r27,FM_ARG0+0x28(r1) ; Save a register | |
363 | lwz r28,FM_ARG0+0x2C(r1) ; Save a register | |
364 | lwz r29,FM_ARG0+0x30(r1) ; Save a register | |
365 | lwz r30,FM_ARG0+0x34(r1) ; Save a register | |
366 | lwz r31,FM_ARG0+0x38(r1) ; Save a register | |
367 | lwz r1,0(r1) ; Pop the stack | |
d7e50217 | 368 | |
55e303ae | 369 | blr ; Leave... |
d7e50217 | 370 | |
de355530 | 371 | |
de355530 | 372 | .align 5 |
d7e50217 | 373 | |
55e303ae A |
374 | hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags |
375 | li r0,mpC|mpR ; Get a mask to turn off RC bits | |
376 | lwz r23,mpFlags(r31) ; Get the requested flags | |
377 | lwz r20,mpVAddr(r3) ; Get the overlay address | |
378 | lwz r8,mpVAddr(r31) ; Get the requested address | |
379 | lwz r21,mpVAddr+4(r3) ; Get the overlay address | |
380 | lwz r9,mpVAddr+4(r31) ; Get the requested address | |
381 | lhz r10,mpBSize(r3) ; Get the overlay length | |
382 | lhz r11,mpBSize(r31) ; Get the requested length | |
383 | lwz r24,mpPAddr(r3) ; Get the overlay physical address | |
384 | lwz r25,mpPAddr(r31) ; Get the requested physical address | |
385 | andc r21,r21,r0 ; Clear RC bits | |
386 | andc r9,r9,r0 ; Clear RC bits | |
387 | ||
388 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
389 | bl sxlkUnlock ; Unlock the search list | |
390 | ||
391 | rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one? | |
392 | mr r3,r20 ; Save the top of the colliding address | |
393 | rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address | |
394 | ||
395 | bne++ hamRemv ; Removing, go say so so we help... | |
396 | ||
397 | cmplw r20,r8 ; High part of vaddr the same? | |
398 | cmplw cr1,r21,r9 ; Low part? | |
399 | crand cr5_eq,cr0_eq,cr1_eq ; Remember if same | |
400 | ||
401 | cmplw r10,r11 ; Size the same? | |
402 | cmplw cr1,r24,r25 ; Physical address? | |
403 | crand cr5_eq,cr5_eq,cr0_eq ; Remember | |
404 | crand cr5_eq,cr5_eq,cr1_eq ; Remember if same | |
405 | ||
91447636 A |
406 | xor r23,r23,r22 ; Compare mapping flag words |
407 | andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same? | |
55e303ae | 408 | crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check |
91447636 | 409 | bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash... |
55e303ae A |
410 | |
411 | ori r4,r4,mapRtMapDup ; Set duplicate | |
412 | b hamReturn ; And leave... | |
413 | ||
414 | hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision | |
415 | b hamReturn ; Come back yall... | |
91447636 A |
416 | |
417 | hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do | |
418 | b hamReturn ; Join common epilog code | |
55e303ae A |
419 | |
420 | .align 5 | |
421 | ||
422 | hamBadLock: li r3,0 ; Set lock time out error code | |
423 | li r4,mapRtBadLk ; Set lock time out error code | |
424 | b hamReturn ; Leave.... | |
425 | ||
91447636 A |
426 | hamPanic: lis r0,hi16(Choke) ; System abend |
427 | ori r0,r0,lo16(Choke) ; System abend | |
428 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
429 | sc | |
55e303ae | 430 | |
1c79356b | 431 | |
1c79356b A |
432 | |
433 | ||
434 | /* | |
55e303ae | 435 | * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system. |
de355530 | 436 | * |
55e303ae A |
437 | * Upon entry, R3 contains a pointer to a pmap. Since vaddr is |
438 | * a 64-bit quantity, it is a long long so it is in R4 and R5. | |
439 | * | |
440 | * We return the virtual address of the removed mapping as a | |
441 | * R3. | |
1c79356b | 442 | * |
55e303ae | 443 | * Note that this is designed to be called from 32-bit mode with a stack. |
1c79356b | 444 | * |
55e303ae A |
445 | * We disable translation and all interruptions here. This keeps is |
446 | * from having to worry about a deadlock due to having anything locked | |
447 | * and needing it to process a fault. | |
1c79356b A |
448 | * |
449 | * Note that this must be done with both interruptions off and VM off | |
450 | * | |
55e303ae A |
451 | * Remove mapping via pmap, regular page, no pte |
452 | * | |
453 | * 1) lock pmap share | |
454 | * 2) find mapping full path - finds all possible list previous elements | |
455 | * 4) upgrade pmap to exclusive | |
456 | * 3) bump mapping busy count | |
457 | * 5) remove mapping from search list | |
458 | * 6) unlock pmap | |
459 | * 7) lock physent | |
460 | * 8) remove from physent | |
461 | * 9) unlock physent | |
462 | * 10) drop mapping busy count | |
463 | * 11) drain mapping busy count | |
464 | * | |
465 | * | |
466 | * Remove mapping via pmap, regular page, with pte | |
467 | * | |
468 | * 1) lock pmap share | |
469 | * 2) find mapping full path - finds all possible list previous elements | |
470 | * 3) upgrade lock to exclusive | |
471 | * 4) bump mapping busy count | |
472 | * 5) lock PTEG | |
473 | * 6) invalidate pte and tlbie | |
474 | * 7) atomic merge rc into physent | |
475 | * 8) unlock PTEG | |
476 | * 9) remove mapping from search list | |
477 | * 10) unlock pmap | |
478 | * 11) lock physent | |
479 | * 12) remove from physent | |
480 | * 13) unlock physent | |
481 | * 14) drop mapping busy count | |
482 | * 15) drain mapping busy count | |
483 | * | |
484 | * | |
485 | * Remove mapping via pmap, I/O or block | |
486 | * | |
487 | * 1) lock pmap share | |
488 | * 2) find mapping full path - finds all possible list previous elements | |
489 | * 3) upgrade lock to exclusive | |
490 | * 4) bump mapping busy count | |
491 | * 5) mark remove-in-progress | |
492 | * 6) check and bump remove chunk cursor if needed | |
493 | * 7) unlock pmap | |
494 | * 8) if something to invalidate, go to step 11 | |
495 | ||
496 | * 9) drop busy | |
497 | * 10) return with mapRtRemove to force higher level to call again | |
498 | ||
499 | * 11) Lock PTEG | |
500 | * 12) invalidate ptes, no tlbie | |
501 | * 13) unlock PTEG | |
502 | * 14) repeat 11 - 13 for all pages in chunk | |
503 | * 15) if not final chunk, go to step 9 | |
504 | * 16) invalidate tlb entries for the whole block map but no more than the full tlb | |
505 | * 17) lock pmap share | |
506 | * 18) find mapping full path - finds all possible list previous elements | |
507 | * 19) upgrade lock to exclusive | |
508 | * 20) remove mapping from search list | |
509 | * 21) drop mapping busy count | |
510 | * 22) drain mapping busy count | |
511 | * | |
1c79356b A |
512 | */ |
513 | ||
514 | .align 5 | |
515 | .globl EXT(hw_rem_map) | |
516 | ||
517 | LEXT(hw_rem_map) | |
1c79356b | 518 | |
55e303ae A |
519 | ; |
520 | ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE | |
521 | ; THE HW_PURGE_* ROUTINES ALSO | |
522 | ; | |
1c79356b | 523 | |
55e303ae A |
524 | #define hrmStackSize ((31-15+1)*4)+4 |
525 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
526 | mflr r0 ; Save the link register | |
527 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
528 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
529 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
530 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
531 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
532 | mfsprg r19,2 ; Get feature flags | |
533 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
534 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
535 | mtcrf 0x02,r19 ; move pf64Bit cr6 | |
536 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
537 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
538 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
539 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
540 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
541 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
542 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
543 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
544 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
545 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
546 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
547 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
548 | ||
91447636 A |
549 | #if DEBUG |
550 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
551 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
552 | bne hrmPanic ; Call not valid for guest shadow assist pmap | |
553 | #endif | |
554 | ||
55e303ae A |
555 | bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint) |
556 | lwz r9,pmapvr+4(r3) ; Get conversion mask | |
557 | b hrmSF1x ; Done... | |
558 | ||
559 | hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask | |
560 | ||
561 | hrmSF1x: | |
562 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
563 | ||
564 | xor r28,r3,r9 ; Convert the pmap to physical addressing | |
1c79356b | 565 | |
55e303ae A |
566 | ; |
567 | ; Here is where we join in from the hw_purge_* routines | |
568 | ; | |
1c79356b | 569 | |
91447636 A |
570 | hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags |
571 | mfsprg r19,2 ; Get feature flags again (for alternate entries) | |
1c79356b | 572 | |
55e303ae A |
573 | mr r17,r11 ; Save the MSR |
574 | mr r29,r4 ; Top half of vaddr | |
575 | mr r30,r5 ; Bottom half of vaddr | |
1c79356b | 576 | |
91447636 A |
577 | rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active? |
578 | bne-- hrmGuest ; Yes, handle specially | |
579 | ||
55e303ae A |
580 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
581 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
582 | mr. r3,r3 ; Did we get the lock? | |
583 | bne-- hrmBadLock ; Nope... | |
1c79356b | 584 | |
55e303ae A |
585 | ; |
586 | ; Note that we do a full search (i.e., no shortcut level skips, etc.) | |
587 | ; here so that we will know the previous elements so we can dequeue them | |
588 | ; later. Note: we get back mpFlags in R7. | |
589 | ; | |
d7e50217 | 590 | |
55e303ae A |
591 | mr r3,r28 ; Pass in pmap to search |
592 | mr r4,r29 ; High order of address | |
593 | mr r5,r30 ; Low order of address | |
594 | bl EXT(mapSearchFull) ; Go see if we can find it | |
91447636 A |
595 | |
596 | andi. r0,r7,mpPerm ; Mapping marked permanent? | |
597 | crmove cr5_eq,cr0_eq ; Remember permanent marking | |
55e303ae | 598 | mr r20,r7 ; Remember mpFlags |
55e303ae | 599 | mr. r31,r3 ; Did we? (And remember mapping address for later) |
55e303ae | 600 | mr r15,r4 ; Save top of next vaddr |
55e303ae | 601 | mr r16,r5 ; Save bottom of next vaddr |
91447636 | 602 | beq-- hrmNotFound ; Nope, not found... |
55e303ae A |
603 | |
604 | bf-- cr5_eq,hrmPerm ; This one can't be removed... | |
605 | ; | |
606 | ; Here we try to promote to an exclusive lock. This will fail if someone else | |
607 | ; has it shared. | |
608 | ; | |
1c79356b | 609 | |
55e303ae A |
610 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
611 | bl sxlkPromote ; Try to promote shared to exclusive | |
612 | mr. r3,r3 ; Could we? | |
613 | beq++ hrmGotX ; Yeah... | |
1c79356b | 614 | |
55e303ae A |
615 | ; |
616 | ; Since we could not promote our lock, we need to convert to it. | |
617 | ; That means that we drop the shared lock and wait to get it | |
618 | ; exclusive. Since we release the lock, we need to do the look up | |
619 | ; again. | |
620 | ; | |
621 | ||
622 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
623 | bl sxlkConvert ; Convert shared to exclusive | |
624 | mr. r3,r3 ; Could we? | |
625 | bne-- hrmBadLock ; Nope, we must have timed out... | |
626 | ||
627 | mr r3,r28 ; Pass in pmap to search | |
628 | mr r4,r29 ; High order of address | |
629 | mr r5,r30 ; Low order of address | |
630 | bl EXT(mapSearchFull) ; Rescan the list | |
631 | ||
91447636 A |
632 | andi. r0,r7,mpPerm ; Mapping marked permanent? |
633 | crmove cr5_eq,cr0_eq ; Remember permanent marking | |
55e303ae | 634 | mr. r31,r3 ; Did we lose it when we converted? |
55e303ae | 635 | mr r20,r7 ; Remember mpFlags |
55e303ae A |
636 | mr r15,r4 ; Save top of next vaddr |
637 | mr r16,r5 ; Save bottom of next vaddr | |
638 | beq-- hrmNotFound ; Yeah, we did, someone tossed it for us... | |
de355530 | 639 | |
55e303ae A |
640 | bf-- cr5_eq,hrmPerm ; This one can't be removed... |
641 | ||
642 | ; | |
643 | ; We have an exclusive lock on the mapping chain. And we | |
644 | ; also have the busy count bumped in the mapping so it can | |
645 | ; not vanish on us. | |
646 | ; | |
647 | ||
648 | hrmGotX: mr r3,r31 ; Get the mapping | |
649 | bl mapBumpBusy ; Bump up the busy count | |
1c79356b | 650 | |
55e303ae A |
651 | ; |
652 | ; Invalidate any PTEs associated with this | |
653 | ; mapping (more than one if a block) and accumulate the reference | |
654 | ; and change bits. | |
655 | ; | |
656 | ; Here is also where we need to split 32- and 64-bit processing | |
657 | ; | |
1c79356b | 658 | |
55e303ae A |
659 | lwz r21,mpPte(r31) ; Grab the offset to the PTE |
660 | rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine | |
661 | mfsdr1 r29 ; Get the hash table base and size | |
91447636 A |
662 | |
663 | rlwinm r0,r20,0,mpType ; Isolate mapping type | |
664 | cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping | |
665 | cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type | |
666 | ||
55e303ae A |
667 | rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE |
668 | ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit) | |
669 | cmpwi cr1,r0,0 ; Have we made a PTE for this yet? | |
91447636 A |
670 | rlwinm r21,r21,0,~mpHValid ; Clear out valid bit |
671 | crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping | |
55e303ae A |
672 | rlwimi r23,r30,0,0,31 ; Insert low under high part of address |
673 | andc r29,r29,r2 ; Clean up hash table base | |
674 | li r22,0 ; Clear this on out (also sets RC to 0 if we bail) | |
675 | mr r30,r23 ; Move the now merged vaddr to the correct register | |
676 | add r26,r29,r21 ; Point to the PTEG slot | |
677 | ||
678 | bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version... | |
679 | ||
680 | rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry | |
91447636 | 681 | beq- cr5,hrmBlock32 ; Go treat block specially... |
55e303ae A |
682 | subfic r9,r9,-4 ; Get the PCA entry offset |
683 | bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE... | |
684 | add r7,r9,r29 ; Point to the PCA slot | |
55e303ae A |
685 | |
686 | bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA) | |
687 | ||
688 | lwz r21,mpPte(r31) ; Get the quick pointer again | |
689 | lwz r5,0(r26) ; Get the top of PTE | |
1c79356b | 690 | |
55e303ae | 691 | rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE |
91447636 | 692 | rlwinm r21,r21,0,~mpHValid ; Clear out valid bit |
55e303ae A |
693 | rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE |
694 | stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake) | |
695 | beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate... | |
1c79356b | 696 | |
55e303ae | 697 | stw r5,0(r26) ; Invalidate the PTE |
1c79356b | 698 | |
55e303ae | 699 | li r9,tlbieLock ; Get the TLBIE lock |
1c79356b | 700 | |
55e303ae A |
701 | sync ; Make sure the invalid PTE is actually in memory |
702 | ||
703 | hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock | |
704 | mr. r5,r5 ; Is it locked? | |
705 | li r5,1 ; Get locked indicator | |
706 | bne- hrmPtlb32 ; It is locked, go spin... | |
707 | stwcx. r5,0,r9 ; Try to get it | |
708 | bne- hrmPtlb32 ; We was beat... | |
709 | ||
710 | rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP? | |
711 | ||
712 | tlbie r30 ; Invalidate it all corresponding TLB entries | |
1c79356b | 713 | |
55e303ae | 714 | beq- hrmNTlbs ; Jump if we can not do a TLBSYNC.... |
de355530 | 715 | |
55e303ae A |
716 | eieio ; Make sure that the tlbie happens first |
717 | tlbsync ; Wait for everyone to catch up | |
718 | sync ; Make sure of it all | |
719 | ||
720 | hrmNTlbs: li r0,0 ; Clear this | |
721 | rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries) | |
722 | stw r0,tlbieLock(0) ; Clear the tlbie lock | |
723 | lis r0,0x8000 ; Get bit for slot 0 | |
724 | eieio ; Make sure those RC bit have been stashed in PTE | |
725 | ||
726 | srw r0,r0,r2 ; Get the allocation hash mask | |
727 | lwz r22,4(r26) ; Get the latest reference and change bits | |
728 | or r6,r6,r0 ; Show that this slot is free | |
729 | ||
730 | hrmUlckPCA32: | |
731 | eieio ; Make sure all updates come first | |
732 | stw r6,0(r7) ; Unlock the PTEG | |
733 | ||
734 | ; | |
735 | ; Now, it is time to remove the mapping and unlock the chain. | |
736 | ; But first, we need to make sure no one else is using this | |
737 | ; mapping so we drain the busy now | |
738 | ; | |
9bccf70c | 739 | |
55e303ae A |
740 | hrmPysDQ32: mr r3,r31 ; Point to the mapping |
741 | bl mapDrainBusy ; Go wait until mapping is unused | |
d7e50217 | 742 | |
55e303ae A |
743 | mr r3,r28 ; Get the pmap to remove from |
744 | mr r4,r31 ; Point to the mapping | |
745 | bl EXT(mapRemove) ; Remove the mapping from the list | |
d7e50217 | 746 | |
55e303ae | 747 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count |
91447636 A |
748 | rlwinm r0,r20,0,mpType ; Isolate mapping type |
749 | cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type | |
55e303ae A |
750 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
751 | subi r4,r4,1 ; Drop down the mapped page count | |
752 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
753 | bl sxlkUnlock ; Unlock the search list | |
754 | ||
91447636 | 755 | bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done... |
1c79356b | 756 | |
55e303ae | 757 | bl mapPhysFindLock ; Go find and lock the physent |
de355530 | 758 | |
55e303ae A |
759 | lwz r9,ppLink+4(r3) ; Get first mapping |
760 | ||
761 | mr r4,r22 ; Get the RC bits we just got | |
762 | bl mapPhysMerge ; Go merge the RC bits | |
763 | ||
91447636 | 764 | rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer |
d7e50217 | 765 | |
55e303ae A |
766 | cmplw r9,r31 ; Are we the first on the list? |
767 | bne- hrmNot1st ; Nope... | |
d7e50217 | 768 | |
55e303ae A |
769 | li r9,0 ; Get a 0 |
770 | lwz r4,mpAlias+4(r31) ; Get our new forward pointer | |
771 | stw r9,mpAlias+4(r31) ; Make sure we are off the chain | |
772 | bl mapPhyCSet32 ; Go set the physent link and preserve flags | |
d7e50217 | 773 | |
55e303ae | 774 | b hrmPhyDQd ; Join up and unlock it all... |
d7e50217 | 775 | |
55e303ae | 776 | .align 5 |
d7e50217 | 777 | |
55e303ae A |
778 | hrmPerm: li r8,-4096 ; Get the value we need to round down to a page |
779 | and r8,r8,r31 ; Get back to a page | |
780 | lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap | |
de355530 | 781 | |
55e303ae A |
782 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
783 | bl sxlkUnlock ; Unlock the search list | |
784 | ||
785 | xor r3,r31,r8 ; Flip mapping address to virtual | |
786 | ori r3,r3,mapRtPerm ; Set permanent mapping error | |
787 | b hrmErRtn | |
788 | ||
789 | hrmBadLock: li r3,mapRtBadLk ; Set bad lock | |
790 | b hrmErRtn | |
791 | ||
792 | hrmEndInSight: | |
793 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
794 | bl sxlkUnlock ; Unlock the search list | |
795 | ||
796 | hrmDoneChunk: | |
797 | mr r3,r31 ; Point to the mapping | |
798 | bl mapDropBusy ; Drop the busy here since we need to come back | |
799 | li r3,mapRtRemove ; Say we are still removing this | |
800 | b hrmErRtn | |
1c79356b | 801 | |
55e303ae A |
802 | .align 5 |
803 | ||
804 | hrmNotFound: | |
805 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
806 | bl sxlkUnlock ; Unlock the search list | |
91447636 | 807 | li r3,mapRtNotFnd ; No mapping found |
1c79356b | 808 | |
55e303ae | 809 | hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint) |
1c79356b | 810 | |
55e303ae A |
811 | mtmsr r17 ; Restore enables/translation/etc. |
812 | isync | |
813 | b hrmRetnCmn ; Join the common return code... | |
de355530 | 814 | |
55e303ae A |
815 | hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc. |
816 | isync | |
817 | b hrmRetnCmn ; Join the common return code... | |
1c79356b A |
818 | |
819 | .align 5 | |
1c79356b | 820 | |
55e303ae A |
821 | hrmNot1st: mr. r8,r9 ; Remember and test current node |
822 | beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us... | |
823 | lwz r9,mpAlias+4(r9) ; Chain to the next | |
824 | cmplw r9,r31 ; Is this us? | |
825 | bne- hrmNot1st ; Not us... | |
826 | ||
827 | lwz r9,mpAlias+4(r9) ; Get our forward pointer | |
828 | stw r9,mpAlias+4(r8) ; Unchain us | |
d7e50217 | 829 | |
55e303ae A |
830 | nop ; For alignment |
831 | ||
832 | hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain | |
1c79356b | 833 | |
55e303ae A |
834 | hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page |
835 | mr r3,r31 ; Copy the pointer to the mapping | |
836 | lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap | |
837 | bl mapDrainBusy ; Go wait until mapping is unused | |
1c79356b | 838 | |
55e303ae | 839 | xor r3,r31,r8 ; Flip mapping address to virtual |
1c79356b | 840 | |
55e303ae A |
841 | mtmsr r17 ; Restore enables/translation/etc. |
842 | isync | |
1c79356b | 843 | |
55e303ae A |
844 | hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr |
845 | lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return | |
846 | lwz r17,FM_ARG0+0x08(r1) ; Restore a register | |
847 | lwz r18,FM_ARG0+0x0C(r1) ; Restore a register | |
848 | mr. r6,r6 ; Should we pass back the "next" vaddr? | |
849 | lwz r19,FM_ARG0+0x10(r1) ; Restore a register | |
850 | lwz r20,FM_ARG0+0x14(r1) ; Restore a register | |
851 | mtlr r0 ; Restore the return | |
852 | ||
853 | rlwinm r16,r16,0,0,19 ; Clean to a page boundary | |
854 | beq hrmNoNextAdr ; Do not pass back the next vaddr... | |
855 | stw r15,0(r6) ; Pass back the top of the next vaddr | |
856 | stw r16,4(r6) ; Pass back the bottom of the next vaddr | |
857 | ||
858 | hrmNoNextAdr: | |
859 | lwz r15,FM_ARG0+0x00(r1) ; Restore a register | |
860 | lwz r16,FM_ARG0+0x04(r1) ; Restore a register | |
861 | lwz r21,FM_ARG0+0x18(r1) ; Restore a register | |
862 | rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit | |
863 | lwz r22,FM_ARG0+0x1C(r1) ; Restore a register | |
864 | lwz r23,FM_ARG0+0x20(r1) ; Restore a register | |
865 | lwz r24,FM_ARG0+0x24(r1) ; Restore a register | |
866 | lwz r25,FM_ARG0+0x28(r1) ; Restore a register | |
867 | lwz r26,FM_ARG0+0x2C(r1) ; Restore a register | |
868 | lwz r27,FM_ARG0+0x30(r1) ; Restore a register | |
869 | lwz r28,FM_ARG0+0x34(r1) ; Restore a register | |
870 | lwz r29,FM_ARG0+0x38(r1) ; Restore a register | |
871 | lwz r30,FM_ARG0+0x3C(r1) ; Restore a register | |
872 | lwz r31,FM_ARG0+0x40(r1) ; Restore a register | |
873 | lwz r1,0(r1) ; Pop the stack | |
874 | blr ; Leave... | |
875 | ||
876 | ; | |
877 | ; Here is where we come when all is lost. Somehow, we failed a mapping function | |
878 | ; that must work... All hope is gone. Alas, we die....... | |
879 | ; | |
d7e50217 | 880 | |
55e303ae A |
881 | hrmPanic: lis r0,hi16(Choke) ; System abend |
882 | ori r0,r0,lo16(Choke) ; System abend | |
883 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
884 | sc | |
1c79356b A |
885 | |
886 | ||
55e303ae A |
887 | ; |
888 | ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed | |
889 | ; in the range. Then, if we did not finish, return a code indicating that we need to | |
890 | ; be called again. Eventually, we will finish and then, we will do a TLBIE for each | |
891 | ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture) | |
892 | ; | |
893 | ; A potential speed up is that we stop the invalidate loop once we have walked through | |
894 | ; the hash table once. This really is not worth the trouble because we need to have | |
895 | ; mapped 1/2 of physical RAM in an individual block. Way unlikely. | |
896 | ; | |
897 | ; We should rethink this and see if we think it will be faster to check PTE and | |
898 | ; only invalidate the specific PTE rather than all block map PTEs in the PTEG. | |
899 | ; | |
1c79356b | 900 | |
55e303ae | 901 | .align 5 |
1c79356b | 902 | |
3a60a9f5 A |
903 | hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu |
904 | rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu | |
55e303ae | 905 | lhz r25,mpBSize(r31) ; Get the number of pages in block |
3a60a9f5 | 906 | lhz r23,mpSpace(r31) ; Get the address space hash |
55e303ae | 907 | lwz r9,mpBlkRemCur(r31) ; Get our current remove position |
3a60a9f5 A |
908 | rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13 |
909 | addi r25,r25,1 ; Account for zero-based counting | |
55e303ae | 910 | ori r0,r20,mpRIP ; Turn on the remove in progress flag |
3a60a9f5 | 911 | slw r25,r25,r29 ; Adjust for 32MB if needed |
55e303ae A |
912 | mfsdr1 r29 ; Get the hash table base and size |
913 | rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash | |
3a60a9f5 | 914 | subi r25,r25,1 ; Convert back to zero-based counting |
55e303ae A |
915 | lwz r27,mpVAddr+4(r31) ; Get the base vaddr |
916 | sub r4,r25,r9 ; Get number of pages left | |
917 | cmplw cr1,r9,r25 ; Have we already hit the end? | |
918 | addi r10,r9,mapRemChunk ; Point to the start of the next chunk | |
919 | addi r2,r4,-mapRemChunk ; See if mapRemChunk or more | |
920 | rlwinm r26,r29,16,7,15 ; Get the hash table size | |
921 | srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more | |
922 | stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on | |
923 | subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk) | |
924 | cmpwi cr7,r2,0 ; Remember if we have finished | |
925 | slwi r0,r9,12 ; Make cursor into page offset | |
926 | or r24,r24,r23 ; Get full hash | |
927 | and r4,r4,r2 ; If more than a chunk, bring this back to 0 | |
928 | rlwinm r29,r29,0,0,15 ; Isolate the hash table base | |
929 | add r27,r27,r0 ; Adjust vaddr to start of current chunk | |
930 | addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize) | |
931 | ||
932 | bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk... | |
933 | ||
934 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
935 | stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end) | |
936 | bl sxlkUnlock ; Unlock the search list while we are invalidating | |
937 | ||
938 | rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment | |
939 | rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27) | |
940 | xor r24,r24,r8 ; Get the proper VSID | |
941 | rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27) | |
942 | ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length | |
943 | rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset | |
944 | rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units | |
945 | add r22,r22,r30 ; Get end address (in PTEG units) | |
946 | ||
947 | hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index | |
948 | xor r23,r23,r24 ; Hash it | |
949 | and r23,r23,r26 ; Wrap it into the table | |
950 | rlwinm r3,r23,28,4,29 ; Change to PCA offset | |
951 | subfic r3,r3,-4 ; Get the PCA entry offset | |
952 | add r7,r3,r29 ; Point to the PCA slot | |
953 | cmplw cr5,r30,r22 ; Check if we reached the end of the range | |
954 | addi r30,r30,64 ; bump to the next vaddr | |
955 | ||
956 | bl mapLockPteg ; Lock the PTEG | |
957 | ||
958 | rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA | |
959 | add r5,r23,r29 ; Point to the PTEG | |
960 | li r0,0 ; Set an invalid PTE value | |
961 | beq+ hrmBNone32 ; No block map PTEs in this PTEG... | |
962 | mtcrf 0x80,r4 ; Set CRs to select PTE slots | |
963 | mtcrf 0x40,r4 ; Set CRs to select PTE slots | |
1c79356b | 964 | |
55e303ae A |
965 | bf 0,hrmSlot0 ; No autogen here |
966 | stw r0,0x00(r5) ; Invalidate PTE | |
1c79356b | 967 | |
55e303ae A |
968 | hrmSlot0: bf 1,hrmSlot1 ; No autogen here |
969 | stw r0,0x08(r5) ; Invalidate PTE | |
1c79356b | 970 | |
55e303ae A |
971 | hrmSlot1: bf 2,hrmSlot2 ; No autogen here |
972 | stw r0,0x10(r5) ; Invalidate PTE | |
1c79356b | 973 | |
55e303ae A |
974 | hrmSlot2: bf 3,hrmSlot3 ; No autogen here |
975 | stw r0,0x18(r5) ; Invalidate PTE | |
1c79356b | 976 | |
55e303ae A |
977 | hrmSlot3: bf 4,hrmSlot4 ; No autogen here |
978 | stw r0,0x20(r5) ; Invalidate PTE | |
1c79356b | 979 | |
55e303ae A |
980 | hrmSlot4: bf 5,hrmSlot5 ; No autogen here |
981 | stw r0,0x28(r5) ; Invalidate PTE | |
1c79356b | 982 | |
55e303ae A |
983 | hrmSlot5: bf 6,hrmSlot6 ; No autogen here |
984 | stw r0,0x30(r5) ; Invalidate PTE | |
1c79356b | 985 | |
55e303ae A |
986 | hrmSlot6: bf 7,hrmSlot7 ; No autogen here |
987 | stw r0,0x38(r5) ; Invalidate PTE | |
1c79356b | 988 | |
55e303ae A |
989 | hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen |
990 | or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared | |
991 | andc r6,r6,r0 ; Turn off all the old autogen bits | |
9bccf70c | 992 | |
55e303ae | 993 | hrmBNone32: eieio ; Make sure all updates come first |
9bccf70c | 994 | |
55e303ae | 995 | stw r6,0(r7) ; Unlock and set the PCA |
1c79356b | 996 | |
55e303ae | 997 | bne+ cr5,hrmBInv32 ; Go invalidate the next... |
1c79356b | 998 | |
55e303ae | 999 | bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again... |
1c79356b | 1000 | |
55e303ae A |
1001 | mr r3,r31 ; Copy the pointer to the mapping |
1002 | bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one | |
1c79356b | 1003 | |
55e303ae A |
1004 | sync ; Make sure memory is consistent |
1005 | ||
1006 | subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here) | |
1007 | li r6,63 ; Assume full invalidate for now | |
1008 | srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise | |
1009 | andc r6,r6,r5 ; Clear max if we have less to do | |
1010 | and r5,r25,r5 ; Clear count if we have more than max | |
1011 | lwz r27,mpVAddr+4(r31) ; Get the base vaddr again | |
1012 | li r7,tlbieLock ; Get the TLBIE lock | |
1013 | or r5,r5,r6 ; Get number of TLBIEs needed | |
1014 | ||
1015 | hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock | |
1016 | mr. r2,r2 ; Is it locked? | |
1017 | li r2,1 ; Get our lock value | |
1018 | bne- hrmBTLBlck ; It is locked, go wait... | |
1019 | stwcx. r2,0,r7 ; Try to get it | |
1020 | bne- hrmBTLBlck ; We was beat... | |
1021 | ||
1022 | hrmBTLBi: addic. r5,r5,-1 ; See if we did them all | |
1023 | tlbie r27 ; Invalidate it everywhere | |
1024 | addi r27,r27,0x1000 ; Up to the next page | |
1025 | bge+ hrmBTLBi ; Make sure we have done it all... | |
1026 | ||
1027 | rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP? | |
1028 | li r2,0 ; Lock clear value | |
1029 | ||
1030 | sync ; Make sure all is quiet | |
1031 | beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC.... | |
1032 | ||
1033 | eieio ; Make sure that the tlbie happens first | |
1034 | tlbsync ; Wait for everyone to catch up | |
1035 | sync ; Wait for quiet again | |
1036 | ||
1037 | hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock | |
1038 | ||
1039 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1040 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
1041 | mr. r3,r3 ; Did we get the lock? | |
1042 | bne- hrmPanic ; Nope... | |
1043 | ||
1044 | lwz r4,mpVAddr(r31) ; High order of address | |
1045 | lwz r5,mpVAddr+4(r31) ; Low order of address | |
1046 | mr r3,r28 ; Pass in pmap to search | |
1047 | mr r29,r4 ; Save this in case we need it (only promote fails) | |
1048 | mr r30,r5 ; Save this in case we need it (only promote fails) | |
1049 | bl EXT(mapSearchFull) ; Go see if we can find it | |
1050 | ||
1051 | mr. r3,r3 ; Did we? (And remember mapping address for later) | |
1052 | mr r15,r4 ; Save top of next vaddr | |
1053 | mr r16,r5 ; Save bottom of next vaddr | |
1054 | beq- hrmPanic ; Nope, not found... | |
1055 | ||
1056 | cmplw r3,r31 ; Same mapping? | |
1057 | bne- hrmPanic ; Not good... | |
1058 | ||
1059 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1060 | bl sxlkPromote ; Try to promote shared to exclusive | |
1061 | mr. r3,r3 ; Could we? | |
1062 | mr r3,r31 ; Restore the mapping pointer | |
1063 | beq+ hrmBDone1 ; Yeah... | |
1064 | ||
1065 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1066 | bl sxlkConvert ; Convert shared to exclusive | |
1067 | mr. r3,r3 ; Could we? | |
1068 | bne-- hrmPanic ; Nope, we must have timed out... | |
1069 | ||
1070 | mr r3,r28 ; Pass in pmap to search | |
1071 | mr r4,r29 ; High order of address | |
1072 | mr r5,r30 ; Low order of address | |
1073 | bl EXT(mapSearchFull) ; Rescan the list | |
1074 | ||
1075 | mr. r3,r3 ; Did we lose it when we converted? | |
1076 | mr r15,r4 ; Save top of next vaddr | |
1077 | mr r16,r5 ; Save bottom of next vaddr | |
1078 | beq-- hrmPanic ; Yeah, we did, someone tossed it for us... | |
1079 | ||
1080 | hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused | |
1081 | ||
1082 | mr r3,r28 ; Get the pmap to remove from | |
1083 | mr r4,r31 ; Point to the mapping | |
1084 | bl EXT(mapRemove) ; Remove the mapping from the list | |
1085 | ||
1086 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count | |
1087 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1088 | subi r4,r4,1 ; Drop down the mapped page count | |
1089 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
1090 | bl sxlkUnlock ; Unlock the search list | |
1091 | ||
1092 | b hrmRetn32 ; We are all done, get out... | |
1c79356b | 1093 | |
55e303ae A |
1094 | ; |
1095 | ; Here we handle the 64-bit version of hw_rem_map | |
1096 | ; | |
1097 | ||
1c79356b | 1098 | .align 5 |
55e303ae A |
1099 | |
1100 | hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry | |
91447636 | 1101 | beq-- cr5,hrmBlock64 ; Go treat block specially... |
55e303ae A |
1102 | subfic r9,r9,-4 ; Get the PCA entry offset |
1103 | bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE... | |
1104 | add r7,r9,r29 ; Point to the PCA slot | |
1105 | ||
1106 | bl mapLockPteg ; Go lock up the PTEG | |
1107 | ||
1108 | lwz r21,mpPte(r31) ; Get the quick pointer again | |
1109 | ld r5,0(r26) ; Get the top of PTE | |
1110 | ||
1111 | rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE | |
91447636 | 1112 | rlwinm r21,r21,0,~mpHValid ; Clear out valid bit |
55e303ae | 1113 | sldi r23,r5,16 ; Shift AVPN up to EA format |
91447636 | 1114 | // **** Need to adjust above shift based on the page size - large pages need to shift a bit more |
55e303ae A |
1115 | rldicr r5,r5,0,62 ; Clear the valid bit |
1116 | rldimi r23,r30,0,36 ; Insert the page portion of the VPN | |
1117 | stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake) | |
1118 | beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate... | |
1119 | ||
1120 | std r5,0(r26) ; Invalidate the PTE | |
1121 | ||
1122 | li r9,tlbieLock ; Get the TLBIE lock | |
1123 | ||
1124 | sync ; Make sure the invalid PTE is actually in memory | |
1125 | ||
1126 | hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock | |
1127 | rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to | |
1128 | mr. r5,r5 ; Is it locked? | |
1129 | li r5,1 ; Get locked indicator | |
1130 | bne-- hrmPtlb64w ; It is locked, go spin... | |
1131 | stwcx. r5,0,r9 ; Try to get it | |
1132 | bne-- hrmPtlb64 ; We was beat... | |
1133 | ||
91447636 | 1134 | tlbie r23 ; Invalidate all corresponding TLB entries |
1c79356b | 1135 | |
55e303ae A |
1136 | eieio ; Make sure that the tlbie happens first |
1137 | tlbsync ; Wait for everyone to catch up | |
55e303ae A |
1138 | |
1139 | ptesync ; Make sure of it all | |
1140 | li r0,0 ; Clear this | |
1141 | rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries) | |
1142 | stw r0,tlbieLock(0) ; Clear the tlbie lock | |
1143 | oris r0,r0,0x8000 ; Assume slot 0 | |
91447636 | 1144 | |
55e303ae | 1145 | srw r0,r0,r2 ; Get slot mask to deallocate |
d7e50217 | 1146 | |
55e303ae A |
1147 | lwz r22,12(r26) ; Get the latest reference and change bits |
1148 | or r6,r6,r0 ; Make the guy we killed free | |
de355530 | 1149 | |
55e303ae A |
1150 | hrmUlckPCA64: |
1151 | eieio ; Make sure all updates come first | |
1152 | ||
1153 | stw r6,0(r7) ; Unlock and change the PCA | |
1154 | ||
1155 | hrmPysDQ64: mr r3,r31 ; Point to the mapping | |
1156 | bl mapDrainBusy ; Go wait until mapping is unused | |
1157 | ||
91447636 | 1158 | mr r3,r28 ; Get the pmap to remove from |
55e303ae A |
1159 | mr r4,r31 ; Point to the mapping |
1160 | bl EXT(mapRemove) ; Remove the mapping from the list | |
1161 | ||
91447636 A |
1162 | rlwinm r0,r20,0,mpType ; Isolate mapping type |
1163 | cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type | |
55e303ae | 1164 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count |
55e303ae A |
1165 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
1166 | subi r4,r4,1 ; Drop down the mapped page count | |
1167 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
1168 | bl sxlkUnlock ; Unlock the search list | |
1169 | ||
91447636 | 1170 | bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done... |
1c79356b | 1171 | |
55e303ae | 1172 | bl mapPhysFindLock ; Go find and lock the physent |
1c79356b | 1173 | |
91447636 | 1174 | li r0,ppLFAmask ; Get mask to clean up mapping pointer |
55e303ae | 1175 | ld r9,ppLink(r3) ; Get first mapping |
91447636 | 1176 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae | 1177 | mr r4,r22 ; Get the RC bits we just got |
1c79356b | 1178 | |
55e303ae | 1179 | bl mapPhysMerge ; Go merge the RC bits |
d7e50217 | 1180 | |
55e303ae | 1181 | andc r9,r9,r0 ; Clean up the mapping pointer |
d7e50217 | 1182 | |
55e303ae | 1183 | cmpld r9,r31 ; Are we the first on the list? |
91447636 | 1184 | bne-- hrmNot1st64 ; Nope... |
1c79356b | 1185 | |
55e303ae A |
1186 | li r9,0 ; Get a 0 |
1187 | ld r4,mpAlias(r31) ; Get our forward pointer | |
1188 | ||
1189 | std r9,mpAlias(r31) ; Make sure we are off the chain | |
1190 | bl mapPhyCSet64 ; Go set the physent link and preserve flags | |
de355530 | 1191 | |
55e303ae A |
1192 | b hrmPhyDQd64 ; Join up and unlock it all... |
1193 | ||
1194 | hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory | |
1195 | stwcx. r5,0,r5 ; Clear the pending reservation | |
de355530 | 1196 | |
d7e50217 | 1197 | |
55e303ae A |
1198 | hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation |
1199 | mr. r5,r5 ; is it locked? | |
1200 | beq++ hrmPtlb64 ; Nope... | |
1201 | b hrmPtlb64x ; Sniff some more... | |
1202 | ||
1203 | .align 5 | |
1204 | ||
1205 | hrmNot1st64: | |
1206 | mr. r8,r9 ; Remember and test current node | |
91447636 | 1207 | beq-- hrmPhyDQd64 ; Could not find our node... |
55e303ae A |
1208 | ld r9,mpAlias(r9) ; Chain to the next |
1209 | cmpld r9,r31 ; Is this us? | |
91447636 | 1210 | bne-- hrmNot1st64 ; Not us... |
55e303ae A |
1211 | |
1212 | ld r9,mpAlias(r9) ; Get our forward pointer | |
1213 | std r9,mpAlias(r8) ; Unchain us | |
1214 | ||
1215 | nop ; For alignment | |
1216 | ||
1217 | hrmPhyDQd64: | |
1218 | bl mapPhysUnlock ; Unlock the physent chain | |
1c79356b | 1219 | |
55e303ae A |
1220 | hrmRetn64: rldicr r8,r31,0,51 ; Find start of page |
1221 | mr r3,r31 ; Copy the pointer to the mapping | |
1222 | lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap | |
1223 | bl mapDrainBusy ; Go wait until mapping is unused | |
1c79356b | 1224 | |
55e303ae | 1225 | xor r3,r31,r8 ; Flip mapping address to virtual |
d7e50217 | 1226 | |
55e303ae | 1227 | mtmsrd r17 ; Restore enables/translation/etc. |
de355530 | 1228 | isync |
55e303ae A |
1229 | |
1230 | b hrmRetnCmn ; Join the common return path... | |
1c79356b | 1231 | |
1c79356b | 1232 | |
55e303ae A |
1233 | ; |
1234 | ; Check hrmBlock32 for comments. | |
1235 | ; | |
1c79356b | 1236 | |
de355530 | 1237 | .align 5 |
55e303ae | 1238 | |
3a60a9f5 A |
1239 | hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu |
1240 | rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu | |
55e303ae A |
1241 | lhz r24,mpSpace(r31) ; Get the address space hash |
1242 | lhz r25,mpBSize(r31) ; Get the number of pages in block | |
1243 | lwz r9,mpBlkRemCur(r31) ; Get our current remove position | |
3a60a9f5 A |
1244 | rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13 |
1245 | addi r25,r25,1 ; Account for zero-based counting | |
55e303ae | 1246 | ori r0,r20,mpRIP ; Turn on the remove in progress flag |
3a60a9f5 | 1247 | slw r25,r25,r29 ; Adjust for 32MB if needed |
55e303ae A |
1248 | mfsdr1 r29 ; Get the hash table base and size |
1249 | ld r27,mpVAddr(r31) ; Get the base vaddr | |
3a60a9f5 | 1250 | subi r25,r25,1 ; Convert back to zero-based counting |
55e303ae A |
1251 | rlwinm r5,r29,0,27,31 ; Isolate the size |
1252 | sub r4,r25,r9 ; Get number of pages left | |
1253 | cmplw cr1,r9,r25 ; Have we already hit the end? | |
1254 | addi r10,r9,mapRemChunk ; Point to the start of the next chunk | |
1255 | addi r2,r4,-mapRemChunk ; See if mapRemChunk or more | |
1256 | stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on | |
1257 | srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more | |
1258 | subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk) | |
1259 | cmpwi cr7,r2,0 ; Remember if we are doing the last chunk | |
1260 | and r4,r4,r2 ; If more than a chunk, bring this back to 0 | |
1261 | srdi r27,r27,12 ; Change address into page index | |
1262 | addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize) | |
1263 | add r27,r27,r9 ; Adjust vaddr to start of current chunk | |
1264 | ||
1265 | bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk... | |
1266 | ||
1267 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1268 | stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end) | |
1269 | bl sxlkUnlock ; Unlock the search list while we are invalidating | |
1270 | ||
1271 | rlwimi r24,r24,14,4,17 ; Insert a copy of space hash | |
1272 | eqv r26,r26,r26 ; Get all foxes here | |
1273 | rldimi r24,r24,28,8 ; Make a couple copies up higher | |
1274 | rldicr r29,r29,0,47 ; Isolate just the hash table base | |
1275 | subfic r5,r5,46 ; Get number of leading zeros | |
1276 | srd r26,r26,r5 ; Shift the size bits over | |
1277 | mr r30,r27 ; Get start of chunk to invalidate | |
1278 | rldicr r26,r26,0,56 ; Make length in PTEG units | |
1279 | add r22,r4,r30 ; Get end page number | |
1280 | ||
1281 | hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID | |
1282 | rldicr r0,r0,0,49 ; Clean all but segment portion | |
1283 | rlwinm r2,r30,0,16,31 ; Get the current page index | |
1284 | xor r0,r0,r24 ; Form VSID | |
1285 | xor r8,r2,r0 ; Hash the vaddr | |
1286 | sldi r8,r8,7 ; Make into PTEG offset | |
1287 | and r23,r8,r26 ; Wrap into the hash table | |
1288 | rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here) | |
1289 | subfic r3,r3,-4 ; Get the PCA entry offset | |
1290 | add r7,r3,r29 ; Point to the PCA slot | |
1291 | ||
1292 | cmplw cr5,r30,r22 ; Have we reached the end of the range? | |
1293 | ||
1294 | bl mapLockPteg ; Lock the PTEG | |
1295 | ||
1296 | rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any | |
1297 | add r5,r23,r29 ; Point to the PTEG | |
1298 | li r0,0 ; Set an invalid PTE value | |
1299 | beq++ hrmBNone64 ; No block map PTEs in this PTEG... | |
1300 | mtcrf 0x80,r4 ; Set CRs to select PTE slots | |
1301 | mtcrf 0x40,r4 ; Set CRs to select PTE slots | |
1c79356b | 1302 | |
1c79356b | 1303 | |
55e303ae A |
1304 | bf 0,hrmSlot0s ; No autogen here |
1305 | std r0,0x00(r5) ; Invalidate PTE | |
1c79356b | 1306 | |
55e303ae A |
1307 | hrmSlot0s: bf 1,hrmSlot1s ; No autogen here |
1308 | std r0,0x10(r5) ; Invalidate PTE | |
1c79356b | 1309 | |
55e303ae A |
1310 | hrmSlot1s: bf 2,hrmSlot2s ; No autogen here |
1311 | std r0,0x20(r5) ; Invalidate PTE | |
d7e50217 | 1312 | |
55e303ae A |
1313 | hrmSlot2s: bf 3,hrmSlot3s ; No autogen here |
1314 | std r0,0x30(r5) ; Invalidate PTE | |
d7e50217 | 1315 | |
55e303ae A |
1316 | hrmSlot3s: bf 4,hrmSlot4s ; No autogen here |
1317 | std r0,0x40(r5) ; Invalidate PTE | |
d7e50217 | 1318 | |
55e303ae A |
1319 | hrmSlot4s: bf 5,hrmSlot5s ; No autogen here |
1320 | std r0,0x50(r5) ; Invalidate PTE | |
d7e50217 | 1321 | |
55e303ae A |
1322 | hrmSlot5s: bf 6,hrmSlot6s ; No autogen here |
1323 | std r0,0x60(r5) ; Invalidate PTE | |
d7e50217 | 1324 | |
55e303ae A |
1325 | hrmSlot6s: bf 7,hrmSlot7s ; No autogen here |
1326 | std r0,0x70(r5) ; Invalidate PTE | |
d7e50217 | 1327 | |
55e303ae A |
1328 | hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen |
1329 | or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared | |
1330 | andc r6,r6,r0 ; Turn off all the old autogen bits | |
1331 | ||
1332 | hrmBNone64: eieio ; Make sure all updates come first | |
1333 | stw r6,0(r7) ; Unlock and set the PCA | |
1334 | ||
1335 | addi r30,r30,1 ; bump to the next PTEG | |
1336 | bne++ cr5,hrmBInv64 ; Go invalidate the next... | |
1337 | ||
1338 | bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again... | |
1339 | ||
1340 | mr r3,r31 ; Copy the pointer to the mapping | |
1341 | bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one | |
1342 | ||
1343 | sync ; Make sure memory is consistent | |
1344 | ||
1345 | subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here) | |
1346 | li r6,255 ; Assume full invalidate for now | |
1347 | srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise | |
1348 | andc r6,r6,r5 ; Clear max if we have less to do | |
1349 | and r5,r25,r5 ; Clear count if we have more than max | |
1350 | sldi r24,r24,28 ; Get the full XOR value over to segment position | |
1351 | ld r27,mpVAddr(r31) ; Get the base vaddr | |
1352 | li r7,tlbieLock ; Get the TLBIE lock | |
1353 | or r5,r5,r6 ; Get number of TLBIEs needed | |
1c79356b | 1354 | |
55e303ae A |
1355 | hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock |
1356 | mr. r2,r2 ; Is it locked? | |
1357 | li r2,1 ; Get our lock value | |
1358 | bne-- hrmBTLBlcm ; It is locked, go wait... | |
1359 | stwcx. r2,0,r7 ; Try to get it | |
1360 | bne-- hrmBTLBlcl ; We was beat... | |
1361 | ||
1362 | hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID | |
1363 | rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra | |
1364 | addic. r5,r5,-1 ; See if we did them all | |
1365 | xor r2,r2,r24 ; Make the VSID | |
1366 | rldimi r2,r27,0,36 ; Insert the page portion of the VPN | |
1367 | rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta | |
1c79356b | 1368 | |
55e303ae A |
1369 | tlbie r2 ; Invalidate it everywhere |
1370 | addi r27,r27,0x1000 ; Up to the next page | |
1371 | bge++ hrmBTLBj ; Make sure we have done it all... | |
1c79356b | 1372 | |
55e303ae A |
1373 | eieio ; Make sure that the tlbie happens first |
1374 | tlbsync ; wait for everyone to catch up | |
1c79356b | 1375 | |
55e303ae | 1376 | li r2,0 ; Lock clear value |
d7e50217 | 1377 | |
55e303ae | 1378 | ptesync ; Wait for quiet again |
55e303ae A |
1379 | |
1380 | stw r2,tlbieLock(0) ; Clear the tlbie lock | |
1381 | ||
1382 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1383 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
1384 | mr. r3,r3 ; Did we get the lock? | |
1385 | bne- hrmPanic ; Nope... | |
1386 | ||
1387 | lwz r4,mpVAddr(r31) ; High order of address | |
1388 | lwz r5,mpVAddr+4(r31) ; Low order of address | |
1389 | mr r3,r28 ; Pass in pmap to search | |
1390 | mr r29,r4 ; Save this in case we need it (only promote fails) | |
1391 | mr r30,r5 ; Save this in case we need it (only promote fails) | |
1392 | bl EXT(mapSearchFull) ; Go see if we can find it | |
1393 | ||
1394 | mr. r3,r3 ; Did we? (And remember mapping address for later) | |
1395 | mr r15,r4 ; Save top of next vaddr | |
1396 | mr r16,r5 ; Save bottom of next vaddr | |
1397 | beq- hrmPanic ; Nope, not found... | |
1398 | ||
1399 | cmpld r3,r31 ; Same mapping? | |
1400 | bne- hrmPanic ; Not good... | |
1401 | ||
1402 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1403 | bl sxlkPromote ; Try to promote shared to exclusive | |
1404 | mr. r3,r3 ; Could we? | |
1405 | mr r3,r31 ; Restore the mapping pointer | |
1406 | beq+ hrmBDone2 ; Yeah... | |
1407 | ||
1408 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1409 | bl sxlkConvert ; Convert shared to exclusive | |
1410 | mr. r3,r3 ; Could we? | |
1411 | bne-- hrmPanic ; Nope, we must have timed out... | |
1412 | ||
1413 | mr r3,r28 ; Pass in pmap to search | |
1414 | mr r4,r29 ; High order of address | |
1415 | mr r5,r30 ; Low order of address | |
1416 | bl EXT(mapSearchFull) ; Rescan the list | |
1417 | ||
1418 | mr. r3,r3 ; Did we lose it when we converted? | |
1419 | mr r15,r4 ; Save top of next vaddr | |
1420 | mr r16,r5 ; Save bottom of next vaddr | |
1421 | beq-- hrmPanic ; Yeah, we did, someone tossed it for us... | |
1422 | ||
1423 | hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused | |
1424 | ||
1425 | mr r3,r28 ; Get the pmap to remove from | |
1426 | mr r4,r31 ; Point to the mapping | |
1427 | bl EXT(mapRemove) ; Remove the mapping from the list | |
1428 | ||
1429 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count | |
1430 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1431 | subi r4,r4,1 ; Drop down the mapped page count | |
1432 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
1433 | bl sxlkUnlock ; Unlock the search list | |
1434 | ||
1435 | b hrmRetn64 ; We are all done, get out... | |
1436 | ||
1437 | hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line | |
1438 | stwcx. r2,0,r2 ; Unreserve it | |
1439 | ||
1440 | hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock | |
1441 | mr. r2,r2 ; Is it held? | |
1442 | beq++ hrmBTLBlcl ; Nope... | |
1443 | b hrmBTLBlcn ; Yeah... | |
1c79356b | 1444 | |
91447636 A |
1445 | ; |
1446 | ; Guest shadow assist -- mapping remove | |
1447 | ; | |
1448 | ; Method of operation: | |
1449 | ; o Locate the VMM extension block and the host pmap | |
1450 | ; o Obtain the host pmap's search lock exclusively | |
1451 | ; o Locate the requested mapping in the shadow hash table, | |
1452 | ; exit if not found | |
1453 | ; o If connected, disconnect the PTE and gather R&C to physent | |
1454 | ; o Locate and lock the physent | |
1455 | ; o Remove mapping from physent's chain | |
1456 | ; o Unlock physent | |
1457 | ; o Unlock pmap's search lock | |
1458 | ; | |
1459 | ; Non-volatile registers on entry: | |
1460 | ; r17: caller's msr image | |
1461 | ; r19: sprg2 (feature flags) | |
1462 | ; r28: guest pmap's physical address | |
1463 | ; r29: high-order 32 bits of guest virtual address | |
1464 | ; r30: low-order 32 bits of guest virtual address | |
1465 | ; | |
1466 | ; Non-volatile register usage: | |
1467 | ; r26: VMM extension block's physical address | |
1468 | ; r27: host pmap's physical address | |
1469 | ; r28: guest pmap's physical address | |
1470 | ; r29: physent's physical address | |
1471 | ; r30: guest virtual address | |
1472 | ; r31: guest mapping's physical address | |
1473 | ; | |
1474 | .align 5 | |
1475 | hrmGuest: | |
1476 | rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr | |
1477 | bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine | |
1478 | lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr | |
1479 | lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr | |
1480 | b hrmGStart ; Join common code | |
1481 | ||
1482 | hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr | |
1483 | ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr | |
1484 | rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr | |
1485 | ||
1486 | hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
1487 | bl sxlkExclusive ; Get lock exclusive | |
1488 | ||
1489 | lwz r3,vxsGrm(r26) ; Get mapping remove request count | |
1490 | ||
1491 | lwz r9,pmapSpace(r28) ; r9 <- guest space ID number | |
1492 | la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index | |
1493 | srwi r11,r30,12 ; Form shadow hash: | |
1494 | xor r11,r9,r11 ; spaceID ^ (vaddr >> 12) | |
1495 | rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
1496 | ; Form index offset from hash page number | |
1497 | add r31,r31,r12 ; r31 <- hash page index entry | |
1498 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
1499 | mtctr r0 ; in this group | |
1500 | bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search | |
1501 | lwz r31,4(r31) ; r31 <- hash page paddr | |
1502 | rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
1503 | ; r31 <- hash group paddr | |
1504 | ||
1505 | addi r3,r3,1 ; Increment remove request count | |
1506 | stw r3,vxsGrm(r26) ; Update remove request count | |
1507 | ||
1508 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
1509 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
1510 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
1511 | b hrmG32SrchLp ; Let the search begin! | |
1512 | ||
1513 | .align 5 | |
1514 | hrmG32SrchLp: | |
1515 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1516 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
1517 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
1518 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
1519 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
1520 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
1521 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1522 | xor r7,r7,r9 ; Compare space ID | |
1523 | or r0,r11,r7 ; r0 <- !(free && space match) | |
1524 | xor r8,r8,r30 ; Compare virtual address | |
1525 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
1526 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1527 | ||
1528 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
1529 | bdnz hrmG32SrchLp ; Iterate | |
1530 | ||
1531 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1532 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
1533 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1534 | xor r4,r4,r9 ; Compare space ID | |
1535 | or r0,r11,r4 ; r0 <- !(free && space match) | |
1536 | xor r5,r5,r30 ; Compare virtual address | |
1537 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
1538 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1539 | b hrmGSrchMiss ; No joy in our hash group | |
1540 | ||
1541 | hrmG64Search: | |
1542 | ld r31,0(r31) ; r31 <- hash page paddr | |
1543 | insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
1544 | ; r31 <- hash group paddr | |
1545 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
1546 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
1547 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
1548 | b hrmG64SrchLp ; Let the search begin! | |
1549 | ||
1550 | .align 5 | |
1551 | hrmG64SrchLp: | |
1552 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1553 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
1554 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
1555 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
1556 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
1557 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
1558 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1559 | xor r7,r7,r9 ; Compare space ID | |
1560 | or r0,r11,r7 ; r0 <- !(free && space match) | |
1561 | xor r8,r8,r30 ; Compare virtual address | |
1562 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
1563 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1564 | ||
1565 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
1566 | bdnz hrmG64SrchLp ; Iterate | |
1567 | ||
1568 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1569 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
1570 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1571 | xor r4,r4,r9 ; Compare space ID | |
1572 | or r0,r11,r4 ; r0 <- !(free && space match) | |
1573 | xor r5,r5,r30 ; Compare virtual address | |
1574 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
1575 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1576 | hrmGSrchMiss: | |
1577 | lwz r3,vxsGrmMiss(r26) ; Get remove miss count | |
1578 | li r25,mapRtNotFnd ; Return not found | |
1579 | addi r3,r3,1 ; Increment miss count | |
1580 | stw r3,vxsGrmMiss(r26) ; Update miss count | |
1581 | b hrmGReturn ; Join guest return | |
1582 | ||
1583 | .align 5 | |
1584 | hrmGSrchHit: | |
1585 | rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant? | |
1586 | bne hrmGDormant ; Yes, nothing to disconnect | |
1587 | ||
1588 | lwz r3,vxsGrmActive(r26) ; Get active hit count | |
1589 | addi r3,r3,1 ; Increment active hit count | |
1590 | stw r3,vxsGrmActive(r26) ; Update hit count | |
1591 | ||
1592 | bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately | |
1593 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
1594 | ; r31 <- mapping's physical address | |
1595 | ; r3 -> PTE slot physical address | |
1596 | ; r4 -> High-order 32 bits of PTE | |
1597 | ; r5 -> Low-order 32 bits of PTE | |
1598 | ; r6 -> PCA | |
1599 | ; r7 -> PCA physical address | |
1600 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
1601 | b hrmGFreePTE ; Join 64-bit path to release the PTE | |
1602 | hrmGDscon64: | |
1603 | bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
1604 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
1605 | hrmGFreePTE: | |
1606 | mr. r3,r3 ; Was there a valid PTE? | |
1607 | beq hrmGDormant ; No valid PTE, we're almost done | |
1608 | lis r0,0x8000 ; Prepare free bit for this slot | |
1609 | srw r0,r0,r2 ; Position free bit | |
1610 | or r6,r6,r0 ; Set it in our PCA image | |
1611 | lwz r8,mpPte(r31) ; Get PTE offset | |
1612 | rlwinm r8,r8,0,~mpHValid ; Make the offset invalid | |
1613 | stw r8,mpPte(r31) ; Save invalidated PTE offset | |
1614 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
1615 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
1616 | ||
1617 | hrmGDormant: | |
1618 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
1619 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
1620 | mr. r29,r3 ; Got lock on our physent? | |
1621 | beq-- hrmGBadPLock ; No, time to bail out | |
1622 | ||
1623 | crset cr1_eq ; cr1_eq <- previous link is the anchor | |
1624 | bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine | |
1625 | la r11,ppLink+4(r29) ; Point to chain anchor | |
1626 | lwz r9,ppLink+4(r29) ; Get chain anchor | |
1627 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
1628 | hrmGRemLoop: | |
1629 | beq- hrmGPEMissMiss ; End of chain, this is not good | |
1630 | cmplw r9,r31 ; Is this the mapping to remove? | |
1631 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
1632 | bne hrmGRemNext ; No, chain onward | |
1633 | bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor | |
1634 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
1635 | b hrmGDelete ; Finish deleting mapping | |
1636 | hrmGRemRetry: | |
1637 | lwarx r0,0,r11 ; Get previous link | |
1638 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
1639 | stwcx. r0,0,r11 ; Update previous link | |
1640 | bne- hrmGRemRetry ; Lost reservation, retry | |
1641 | b hrmGDelete ; Finish deleting mapping | |
1642 | ||
1643 | hrmGRemNext: | |
1644 | la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
1645 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
1646 | mr. r9,r8 ; Does next entry exist? | |
1647 | b hrmGRemLoop ; Carry on | |
1648 | ||
1649 | hrmGRemove64: | |
1650 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
1651 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
1652 | la r11,ppLink(r29) ; Point to chain anchor | |
1653 | ld r9,ppLink(r29) ; Get chain anchor | |
1654 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
1655 | hrmGRem64Lp: | |
1656 | beq-- hrmGPEMissMiss ; End of chain, this is not good | |
1657 | cmpld r9,r31 ; Is this the mapping to remove? | |
1658 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
1659 | bne hrmGRem64Nxt ; No mapping to remove, chain on, dude | |
1660 | bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor | |
1661 | std r8,0(r11) ; Unchain gpv->phys mapping | |
1662 | b hrmGDelete ; Finish deleting mapping | |
1663 | hrmGRem64Rt: | |
1664 | ldarx r0,0,r11 ; Get previous link | |
1665 | and r0,r0,r7 ; Get flags | |
1666 | or r0,r0,r8 ; Insert new forward pointer | |
1667 | stdcx. r0,0,r11 ; Slam it back in | |
1668 | bne-- hrmGRem64Rt ; Lost reservation, retry | |
1669 | b hrmGDelete ; Finish deleting mapping | |
1670 | ||
1671 | .align 5 | |
1672 | hrmGRem64Nxt: | |
1673 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
1674 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
1675 | mr. r9,r8 ; Does next entry exist? | |
1676 | b hrmGRem64Lp ; Carry on | |
1677 | ||
1678 | hrmGDelete: | |
1679 | mr r3,r29 ; r3 <- physent addr | |
1680 | bl mapPhysUnlock ; Unlock physent chain | |
1681 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
1682 | rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags | |
1683 | ori r3,r3,mpgFree ; Mark mapping free | |
1684 | stw r3,mpFlags(r31) ; Update flags | |
1685 | li r25,mapRtGuest ; Set return code to 'found guest mapping' | |
1686 | ||
1687 | hrmGReturn: | |
1688 | la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
1689 | bl sxlkUnlock ; Release host pmap search lock | |
1690 | ||
1691 | mr r3,r25 ; r3 <- return code | |
1692 | bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately | |
1693 | mtmsr r17 ; Restore 'rupts, translation | |
1694 | isync ; Throw a small wrench into the pipeline | |
1695 | b hrmRetnCmn ; Nothing to do now but pop a frame and return | |
1696 | hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode | |
1697 | b hrmRetnCmn ; Join common return | |
1698 | ||
1699 | hrmGBadPLock: | |
1700 | hrmGPEMissMiss: | |
1701 | lis r0,hi16(Choke) ; Seen the arrow on the doorpost | |
1702 | ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED" | |
1703 | li r3,failMapping ; All the way from New Orleans | |
1704 | sc ; To Jeruselem | |
1c79356b A |
1705 | |
1706 | ||
1707 | /* | |
55e303ae | 1708 | * mapping *hw_purge_phys(physent) - remove a mapping from the system |
1c79356b | 1709 | * |
55e303ae | 1710 | * Upon entry, R3 contains a pointer to a physent. |
1c79356b | 1711 | * |
55e303ae A |
1712 | * This function removes the first mapping from a physical entry |
1713 | * alias list. It locks the list, extracts the vaddr and pmap from | |
1714 | * the first entry. It then jumps into the hw_rem_map function. | |
1715 | * NOTE: since we jump into rem_map, we need to set up the stack | |
1716 | * identically. Also, we set the next parm to 0 so we do not | |
1717 | * try to save a next vaddr. | |
1718 | * | |
1719 | * We return the virtual address of the removed mapping as a | |
1720 | * R3. | |
de355530 | 1721 | * |
55e303ae | 1722 | * Note that this is designed to be called from 32-bit mode with a stack. |
de355530 | 1723 | * |
55e303ae A |
1724 | * We disable translation and all interruptions here. This keeps is |
1725 | * from having to worry about a deadlock due to having anything locked | |
1726 | * and needing it to process a fault. | |
1c79356b | 1727 | * |
55e303ae A |
1728 | * Note that this must be done with both interruptions off and VM off |
1729 | * | |
1730 | * | |
1731 | * Remove mapping via physical page (mapping_purge) | |
1732 | * | |
1733 | * 1) lock physent | |
1734 | * 2) extract vaddr and pmap | |
1735 | * 3) unlock physent | |
1736 | * 4) do "remove mapping via pmap" | |
1737 | * | |
1c79356b | 1738 | * |
1c79356b A |
1739 | */ |
1740 | ||
1741 | .align 5 | |
55e303ae A |
1742 | .globl EXT(hw_purge_phys) |
1743 | ||
1744 | LEXT(hw_purge_phys) | |
1745 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
1746 | mflr r0 ; Save the link register | |
1747 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
1748 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
1749 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
1750 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
1751 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
1752 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
1753 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
1754 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
1755 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
1756 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
1757 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
1758 | li r6,0 ; Set no next address return | |
1759 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
1760 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
1761 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
1762 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
1763 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
1764 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
1765 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
1766 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1767 | ||
1768 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
1769 | ||
1770 | bl mapPhysLock ; Lock the physent | |
1771 | ||
1772 | bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint) | |
1773 | ||
1774 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
91447636 | 1775 | li r0,ppFlags ; Set the bottom stuff to clear |
55e303ae A |
1776 | b hppJoin ; Join the common... |
1777 | ||
91447636 | 1778 | hppSF: li r0,ppLFAmask |
55e303ae | 1779 | ld r12,ppLink(r3) ; Get the pointer to the first mapping |
91447636 | 1780 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
1781 | |
1782 | hppJoin: andc. r12,r12,r0 ; Clean and test link | |
1783 | beq-- hppNone ; There are no more mappings on physical page | |
1784 | ||
1785 | lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
1786 | lhz r7,mpSpace(r12) ; Get the address space hash | |
1787 | ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
1788 | slwi r0,r7,2 ; Multiply space by 4 | |
1789 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
1790 | slwi r7,r7,3 ; Multiply space by 8 | |
1791 | lwz r5,mpVAddr+4(r12) ; and the bottom | |
1792 | add r7,r7,r0 ; Get correct displacement into translate table | |
1793 | lwz r28,0(r28) ; Get the actual translation map | |
de355530 | 1794 | |
55e303ae A |
1795 | add r28,r28,r7 ; Point to the pmap translation |
1796 | ||
1797 | bl mapPhysUnlock ; Time to unlock the physical entry | |
1798 | ||
1799 | bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint) | |
1800 | ||
1801 | lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap | |
1802 | b hrmJoin ; Go remove the mapping... | |
1803 | ||
1804 | hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap | |
1805 | b hrmJoin ; Go remove the mapping... | |
d7e50217 | 1806 | |
de355530 | 1807 | .align 5 |
55e303ae A |
1808 | |
1809 | hppNone: bl mapPhysUnlock ; Time to unlock the physical entry | |
1810 | ||
1811 | bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)... | |
1812 | ||
1813 | mtmsr r11 ; Restore enables/translation/etc. | |
1814 | isync | |
1815 | b hppRetnCmn ; Join the common return code... | |
1c79356b | 1816 | |
55e303ae A |
1817 | hppSF3: mtmsrd r11 ; Restore enables/translation/etc. |
1818 | isync | |
1c79356b | 1819 | |
55e303ae A |
1820 | ; |
1821 | ; NOTE: we have not used any registers other than the volatiles to this point | |
1822 | ; | |
1c79356b | 1823 | |
55e303ae | 1824 | hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
1c79356b | 1825 | |
91447636 | 1826 | li r3,mapRtEmpty ; Physent chain is empty |
55e303ae A |
1827 | mtlr r12 ; Restore the return |
1828 | lwz r1,0(r1) ; Pop the stack | |
1829 | blr ; Leave... | |
1c79356b A |
1830 | |
1831 | /* | |
55e303ae A |
1832 | * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system. |
1833 | * | |
1834 | * Upon entry, R3 contains a pointer to a pmap. Since vaddr is | |
1835 | * a 64-bit quantity, it is a long long so it is in R4 and R5. | |
1836 | * | |
1837 | * We return the virtual address of the removed mapping as a | |
1838 | * R3. | |
1839 | * | |
1840 | * Note that this is designed to be called from 32-bit mode with a stack. | |
1841 | * | |
1842 | * We disable translation and all interruptions here. This keeps is | |
1843 | * from having to worry about a deadlock due to having anything locked | |
1844 | * and needing it to process a fault. | |
1845 | * | |
1846 | * Note that this must be done with both interruptions off and VM off | |
1847 | * | |
1848 | * Remove a mapping which can be reestablished by VM | |
1849 | * | |
1c79356b | 1850 | */ |
1c79356b | 1851 | |
55e303ae A |
1852 | .align 5 |
1853 | .globl EXT(hw_purge_map) | |
1854 | ||
1855 | LEXT(hw_purge_map) | |
1856 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
1857 | mflr r0 ; Save the link register | |
1858 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
1859 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
1860 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
1861 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
1862 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
1863 | mfsprg r19,2 ; Get feature flags | |
1864 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
1865 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
1866 | mtcrf 0x02,r19 ; move pf64Bit cr6 | |
1867 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
1868 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
1869 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
1870 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
1871 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
1872 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
1873 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
1874 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
1875 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
1876 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
1877 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
1878 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1879 | ||
91447636 A |
1880 | #if DEBUG |
1881 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
1882 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
1883 | bne hpmPanic ; Call not valid for guest shadow assist pmap | |
1884 | #endif | |
1885 | ||
55e303ae A |
1886 | bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint) |
1887 | lwz r9,pmapvr+4(r3) ; Get conversion mask | |
1888 | b hpmSF1x ; Done... | |
1889 | ||
1890 | hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask | |
1891 | ||
1892 | hpmSF1x: | |
1893 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
1894 | ||
1895 | xor r28,r3,r9 ; Convert the pmap to physical addressing | |
1896 | ||
1897 | mr r17,r11 ; Save the MSR | |
1898 | ||
1899 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1900 | bl sxlkExclusive ; Go get an exclusive lock on the mapping lists | |
1901 | mr. r3,r3 ; Did we get the lock? | |
1902 | bne-- hrmBadLock ; Nope... | |
1903 | ; | |
1904 | ; Note that we do a full search (i.e., no shortcut level skips, etc.) | |
1905 | ; here so that we will know the previous elements so we can dequeue them | |
1906 | ; later. | |
1907 | ; | |
1908 | hpmSearch: | |
1909 | mr r3,r28 ; Pass in pmap to search | |
1910 | mr r29,r4 ; Top half of vaddr | |
1911 | mr r30,r5 ; Bottom half of vaddr | |
1912 | bl EXT(mapSearchFull) ; Rescan the list | |
1913 | mr. r31,r3 ; Did we? (And remember mapping address for later) | |
1914 | or r0,r4,r5 ; Are we beyond the end? | |
1915 | mr r15,r4 ; Save top of next vaddr | |
1916 | cmplwi cr1,r0,0 ; See if there is another | |
1917 | mr r16,r5 ; Save bottom of next vaddr | |
1918 | bne-- hpmGotOne ; We found one, go check it out... | |
1919 | ||
1920 | hpmCNext: bne++ cr1,hpmSearch ; There is another to check... | |
1921 | b hrmNotFound ; No more in pmap to check... | |
1922 | ||
1923 | hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags | |
91447636 | 1924 | andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent |
ab86ba33 A |
1925 | rlwinm r21,r20,8,24,31 ; Extract the busy count |
1926 | cmplwi cr2,r21,0 ; Is it busy? | |
1927 | crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed? | |
55e303ae A |
1928 | beq++ hrmGotX ; Found, branch to remove the mapping... |
1929 | b hpmCNext ; Nope... | |
1c79356b | 1930 | |
91447636 A |
1931 | hpmPanic: lis r0,hi16(Choke) ; System abend |
1932 | ori r0,r0,lo16(Choke) ; System abend | |
1933 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
1934 | sc | |
1935 | ||
55e303ae A |
1936 | /* |
1937 | * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space | |
1938 | * | |
1939 | * Upon entry, R3 contains a pointer to a pmap. | |
1940 | * pa is a pointer to the physent | |
1941 | * | |
1942 | * This function removes the first mapping for a specific pmap from a physical entry | |
1943 | * alias list. It locks the list, extracts the vaddr and pmap from | |
1944 | * the first apporpriate entry. It then jumps into the hw_rem_map function. | |
1945 | * NOTE: since we jump into rem_map, we need to set up the stack | |
1946 | * identically. Also, we set the next parm to 0 so we do not | |
1947 | * try to save a next vaddr. | |
1948 | * | |
1949 | * We return the virtual address of the removed mapping as a | |
1950 | * R3. | |
1951 | * | |
1952 | * Note that this is designed to be called from 32-bit mode with a stack. | |
1953 | * | |
1954 | * We disable translation and all interruptions here. This keeps is | |
1955 | * from having to worry about a deadlock due to having anything locked | |
1956 | * and needing it to process a fault. | |
1957 | * | |
1958 | * Note that this must be done with both interruptions off and VM off | |
1959 | * | |
1960 | * | |
1961 | * Remove mapping via physical page (mapping_purge) | |
1962 | * | |
1963 | * 1) lock physent | |
1964 | * 2) extract vaddr and pmap | |
1965 | * 3) unlock physent | |
1966 | * 4) do "remove mapping via pmap" | |
1967 | * | |
1968 | * | |
1969 | */ | |
1c79356b | 1970 | |
55e303ae A |
1971 | .align 5 |
1972 | .globl EXT(hw_purge_space) | |
1973 | ||
1974 | LEXT(hw_purge_space) | |
1975 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
1976 | mflr r0 ; Save the link register | |
1977 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
1978 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
1979 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
1980 | mfsprg r2,2 ; Get feature flags | |
1981 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
1982 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
1983 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
1984 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
1985 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
1986 | mtcrf 0x02,r2 ; move pf64Bit cr6 | |
1987 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
1988 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
1989 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
1990 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
1991 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
1992 | li r6,0 ; Set no next address return | |
1993 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
1994 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
1995 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
1996 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
1997 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
1998 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1999 | ||
91447636 A |
2000 | #if DEBUG |
2001 | lwz r11,pmapFlags(r4) ; Get pmaps flags | |
2002 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
2003 | bne hpsPanic ; Call not valid for guest shadow assist pmap | |
2004 | #endif | |
2005 | ||
55e303ae A |
2006 | bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint) |
2007 | ||
2008 | lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap | |
2009 | ||
2010 | b hpsSF1x ; Done... | |
2011 | ||
2012 | hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap | |
2013 | ||
2014 | hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
2015 | ||
2016 | xor r4,r4,r9 ; Convert the pmap to physical addressing | |
2017 | ||
2018 | bl mapPhysLock ; Lock the physent | |
2019 | ||
2020 | lwz r8,pmapSpace(r4) ; Get the space hash | |
2021 | ||
2022 | bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint) | |
2023 | ||
2024 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
2025 | ||
91447636 | 2026 | hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address |
55e303ae A |
2027 | beq hpsNone ; Did not find one... |
2028 | ||
2029 | lhz r10,mpSpace(r12) ; Get the space | |
2030 | ||
2031 | cmplw r10,r8 ; Is this one of ours? | |
2032 | beq hpsFnd ; Yes... | |
2033 | ||
2034 | lwz r12,mpAlias+4(r12) ; Chain on to the next | |
2035 | b hpsSrc32 ; Check it out... | |
1c79356b | 2036 | |
55e303ae A |
2037 | .align 5 |
2038 | ||
91447636 | 2039 | hpsSF: li r0,ppLFAmask |
55e303ae | 2040 | ld r12,ppLink(r3) ; Get the pointer to the first mapping |
91447636 | 2041 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
2042 | |
2043 | hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address | |
2044 | beq hpsNone ; Did not find one... | |
2045 | ||
2046 | lhz r10,mpSpace(r12) ; Get the space | |
2047 | ||
2048 | cmplw r10,r8 ; Is this one of ours? | |
2049 | beq hpsFnd ; Yes... | |
2050 | ||
2051 | ld r12,mpAlias(r12) ; Chain on to the next | |
2052 | b hpsSrc64 ; Check it out... | |
2053 | ||
2054 | .align 5 | |
1c79356b | 2055 | |
55e303ae A |
2056 | hpsFnd: mr r28,r4 ; Set the pmap physical address |
2057 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
2058 | lwz r5,mpVAddr+4(r12) ; and the bottom | |
2059 | ||
2060 | bl mapPhysUnlock ; Time to unlock the physical entry | |
2061 | b hrmJoin ; Go remove the mapping... | |
2062 | ||
2063 | .align 5 | |
2064 | ||
2065 | hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry | |
1c79356b | 2066 | |
55e303ae | 2067 | bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)... |
1c79356b | 2068 | |
55e303ae A |
2069 | mtmsr r11 ; Restore enables/translation/etc. |
2070 | isync | |
2071 | b hpsRetnCmn ; Join the common return code... | |
1c79356b | 2072 | |
55e303ae A |
2073 | hpsSF3: mtmsrd r11 ; Restore enables/translation/etc. |
2074 | isync | |
1c79356b | 2075 | |
55e303ae A |
2076 | ; |
2077 | ; NOTE: we have not used any registers other than the volatiles to this point | |
2078 | ; | |
d7e50217 | 2079 | |
55e303ae A |
2080 | hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
2081 | ||
91447636 | 2082 | li r3,mapRtEmpty ; No mappings for specified pmap on physent chain |
55e303ae A |
2083 | mtlr r12 ; Restore the return |
2084 | lwz r1,0(r1) ; Pop the stack | |
2085 | blr ; Leave... | |
1c79356b | 2086 | |
91447636 A |
2087 | hpsPanic: lis r0,hi16(Choke) ; System abend |
2088 | ori r0,r0,lo16(Choke) ; System abend | |
2089 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
2090 | sc | |
2091 | ||
2092 | /* | |
2093 | * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host | |
2094 | * on this physent chain | |
2095 | * | |
2096 | * Locates the first guest mapping on the physent chain that is associated with the | |
2097 | * specified host pmap. If this succeeds, the mapping is removed by joining the general | |
2098 | * remove path; otherwise, we return NULL. The caller is expected to invoke this entry | |
2099 | * repeatedly until no additional guest mappings that match our criteria are removed. | |
2100 | * | |
2101 | * Because this entry point exits through hw_rem_map, our prolog pushes its frame. | |
2102 | * | |
2103 | * Parameters: | |
2104 | * r3 : physent, 32-bit kernel virtual address | |
2105 | * r4 : host pmap, 32-bit kernel virtual address | |
2106 | * | |
2107 | * Volatile register usage (for linkage through hrmJoin): | |
2108 | * r4 : high-order 32 bits of guest virtual address | |
2109 | * r5 : low-order 32 bits of guest virtual address | |
2110 | * r11: saved MSR image | |
2111 | * | |
2112 | * Non-volatile register usage: | |
2113 | * r26: VMM extension block's physical address | |
2114 | * r27: host pmap's physical address | |
2115 | * r28: guest pmap's physical address | |
2116 | * | |
2117 | */ | |
2118 | ||
2119 | .align 5 | |
2120 | .globl EXT(hw_scrub_guest) | |
2121 | ||
2122 | LEXT(hw_scrub_guest) | |
2123 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
2124 | mflr r0 ; Save the link register | |
2125 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
2126 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
2127 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
2128 | mfsprg r2,2 ; Get feature flags | |
2129 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
2130 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
2131 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
2132 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
2133 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
2134 | mtcrf 0x02,r2 ; move pf64Bit cr6 | |
2135 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
2136 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
2137 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
2138 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
2139 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
2140 | li r6,0 ; Set no next address return | |
2141 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
2142 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
2143 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
2144 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
2145 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
2146 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
2147 | ||
2148 | lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr | |
2149 | ||
2150 | bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine | |
2151 | lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr | |
2152 | lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt | |
2153 | b hsgStart ; Get to work | |
2154 | ||
2155 | hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr | |
2156 | ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt | |
2157 | ||
2158 | hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode | |
2159 | xor r27,r4,r9 ; Convert host pmap_t virt->real | |
2160 | bl mapPhysLock ; Lock the physent | |
2161 | ||
2162 | bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine | |
2163 | ||
2164 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
2165 | hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address | |
2166 | beq hsg32Miss ; Did not find one... | |
2167 | lwz r8,mpFlags(r12) ; Get mapping's flags | |
2168 | lhz r7,mpSpace(r12) ; Get mapping's space id | |
2169 | rlwinm r8,r8,0,mpType ; Extract mapping's type code | |
2170 | lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2171 | xori r8,r8,mpGuest ; Is it a guest mapping? | |
2172 | ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2173 | slwi r9,r7,2 ; Multiply space by 4 | |
2174 | lwz r28,0(r28) ; Get the actual translation map | |
2175 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
2176 | slwi r7,r7,3 ; Multiply space by 8 | |
2177 | lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr | |
2178 | add r7,r7,r9 ; Get correct displacement into translate table | |
2179 | add r28,r28,r7 ; Point to the pmap translation | |
2180 | lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr | |
2181 | lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr | |
2182 | xor r7,r7,r26 ; Is guest associated with specified host? | |
2183 | or. r7,r7,r8 ; Guest mapping && associated with host? | |
2184 | lwz r12,mpAlias+4(r12) ; Chain on to the next | |
2185 | bne hsg32Loop ; Try next mapping on alias chain | |
2186 | ||
2187 | hsg32Hit: bl mapPhysUnlock ; Unlock physent chain | |
2188 | b hrmJoin ; Join common path for mapping removal | |
2189 | ||
2190 | .align 5 | |
2191 | hsg32Miss: bl mapPhysUnlock ; Unlock physent chain | |
2192 | mtmsr r11 ; Restore 'rupts, translation | |
2193 | isync ; Throw a small wrench into the pipeline | |
2194 | li r3,mapRtEmpty ; No mappings found matching specified criteria | |
2195 | b hrmRetnCmn ; Exit through common epilog | |
2196 | ||
2197 | .align 5 | |
2198 | hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed | |
2199 | ld r12,ppLink(r3) ; Grab the pointer to the first mapping | |
2200 | rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
2201 | hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address | |
2202 | beq hsg64Miss ; Did not find one... | |
2203 | lwz r8,mpFlags(r12) ; Get mapping's flags | |
2204 | lhz r7,mpSpace(r12) ; Get mapping's space id | |
2205 | rlwinm r8,r8,0,mpType ; Extract mapping's type code | |
2206 | lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2207 | xori r8,r8,mpGuest ; Is it a guest mapping? | |
2208 | ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2209 | slwi r9,r7,2 ; Multiply space by 4 | |
2210 | lwz r28,0(r28) ; Get the actual translation map | |
2211 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
2212 | slwi r7,r7,3 ; Multiply space by 8 | |
2213 | lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr | |
2214 | add r7,r7,r9 ; Get correct displacement into translate table | |
2215 | add r28,r28,r7 ; Point to the pmap translation | |
2216 | ld r28,pmapPAddr(r28) ; Get guest pmap paddr | |
2217 | ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr | |
2218 | xor r7,r7,r26 ; Is guest associated with specified host? | |
2219 | or. r7,r7,r8 ; Guest mapping && associated with host? | |
2220 | ld r12,mpAlias(r12) ; Chain on to the next | |
2221 | bne hsg64Loop ; Try next mapping on alias chain | |
2222 | ||
2223 | hsg64Hit: bl mapPhysUnlock ; Unlock physent chain | |
2224 | b hrmJoin ; Join common path for mapping removal | |
2225 | ||
2226 | .align 5 | |
2227 | hsg64Miss: bl mapPhysUnlock ; Unlock physent chain | |
b36670ce | 2228 | mtmsrd r11 ; Restore 'rupts, translation |
91447636 A |
2229 | li r3,mapRtEmpty ; No mappings found matching specified criteria |
2230 | b hrmRetnCmn ; Exit through common epilog | |
2231 | ||
1c79356b A |
2232 | |
2233 | /* | |
55e303ae A |
2234 | * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space |
2235 | * | |
2236 | * Upon entry, R3 contains a pointer to a physent. | |
2237 | * space is the space ID from the pmap in question | |
2238 | * | |
2239 | * We return the virtual address of the found mapping in | |
2240 | * R3. Note that the mapping busy is bumped. | |
2241 | * | |
2242 | * Note that this is designed to be called from 32-bit mode with a stack. | |
2243 | * | |
2244 | * We disable translation and all interruptions here. This keeps is | |
2245 | * from having to worry about a deadlock due to having anything locked | |
2246 | * and needing it to process a fault. | |
2247 | * | |
1c79356b A |
2248 | */ |
2249 | ||
2250 | .align 5 | |
55e303ae A |
2251 | .globl EXT(hw_find_space) |
2252 | ||
2253 | LEXT(hw_find_space) | |
2254 | stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack | |
2255 | mflr r0 ; Save the link register | |
2256 | mr r8,r4 ; Remember the space | |
2257 | stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
2258 | ||
2259 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
1c79356b | 2260 | |
55e303ae | 2261 | bl mapPhysLock ; Lock the physent |
1c79356b | 2262 | |
55e303ae A |
2263 | bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint) |
2264 | ||
2265 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
d7e50217 | 2266 | |
91447636 | 2267 | hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address |
55e303ae A |
2268 | beq hfsNone ; Did not find one... |
2269 | ||
2270 | lhz r10,mpSpace(r12) ; Get the space | |
2271 | ||
2272 | cmplw r10,r8 ; Is this one of ours? | |
2273 | beq hfsFnd ; Yes... | |
2274 | ||
2275 | lwz r12,mpAlias+4(r12) ; Chain on to the next | |
2276 | b hfsSrc32 ; Check it out... | |
1c79356b | 2277 | |
55e303ae A |
2278 | .align 5 |
2279 | ||
91447636 | 2280 | hfsSF: li r0,ppLFAmask |
55e303ae | 2281 | ld r12,ppLink(r3) ; Get the pointer to the first mapping |
91447636 | 2282 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
2283 | |
2284 | hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address | |
2285 | beq hfsNone ; Did not find one... | |
2286 | ||
2287 | lhz r10,mpSpace(r12) ; Get the space | |
2288 | ||
2289 | cmplw r10,r8 ; Is this one of ours? | |
2290 | beq hfsFnd ; Yes... | |
2291 | ||
2292 | ld r12,mpAlias(r12) ; Chain on to the next | |
2293 | b hfsSrc64 ; Check it out... | |
2294 | ||
2295 | .align 5 | |
2296 | ||
2297 | hfsFnd: mr r8,r3 ; Save the physent | |
2298 | mr r3,r12 ; Point to the mapping | |
2299 | bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear | |
1c79356b | 2300 | |
55e303ae A |
2301 | mr r3,r8 ; Get back the physical entry |
2302 | li r7,0xFFF ; Get a page size mask | |
2303 | bl mapPhysUnlock ; Time to unlock the physical entry | |
1c79356b | 2304 | |
55e303ae A |
2305 | andc r3,r12,r7 ; Move the mapping back down to a page |
2306 | lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap | |
2307 | xor r12,r3,r12 ; Convert to virtual | |
2308 | b hfsRet ; Time to return | |
2309 | ||
2310 | .align 5 | |
2311 | ||
2312 | hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry | |
2313 | ||
2314 | hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)... | |
1c79356b | 2315 | |
55e303ae A |
2316 | mtmsr r11 ; Restore enables/translation/etc. |
2317 | isync | |
2318 | b hfsRetnCmn ; Join the common return code... | |
1c79356b | 2319 | |
55e303ae A |
2320 | hfsSF3: mtmsrd r11 ; Restore enables/translation/etc. |
2321 | isync | |
1c79356b | 2322 | |
55e303ae A |
2323 | ; |
2324 | ; NOTE: we have not used any registers other than the volatiles to this point | |
2325 | ; | |
1c79356b | 2326 | |
55e303ae | 2327 | hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed |
91447636 A |
2328 | |
2329 | #if DEBUG | |
2330 | mr. r3,r3 ; Anything to return? | |
2331 | beq hfsRetnNull ; Nope | |
2332 | lwz r11,mpFlags(r3) ; Get mapping flags | |
2333 | rlwinm r0,r11,0,mpType ; Isolate the mapping type | |
2334 | cmplwi r0,mpGuest ; Shadow guest mapping? | |
2335 | beq hfsPanic ; Yup, kick the bucket | |
2336 | hfsRetnNull: | |
2337 | #endif | |
2338 | ||
55e303ae A |
2339 | lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
2340 | ||
2341 | mtlr r12 ; Restore the return | |
2342 | lwz r1,0(r1) ; Pop the stack | |
2343 | blr ; Leave... | |
1c79356b | 2344 | |
91447636 A |
2345 | hfsPanic: lis r0,hi16(Choke) ; System abend |
2346 | ori r0,r0,lo16(Choke) ; System abend | |
2347 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
2348 | sc | |
1c79356b | 2349 | |
55e303ae A |
2350 | ; |
2351 | ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap | |
2352 | ; Returns 0 if not found or the virtual address of the mapping if | |
2353 | ; if is. Also, the mapping has the busy count bumped. | |
2354 | ; | |
2355 | .align 5 | |
2356 | .globl EXT(hw_find_map) | |
1c79356b | 2357 | |
55e303ae A |
2358 | LEXT(hw_find_map) |
2359 | stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
2360 | mflr r0 ; Save the link register | |
2361 | stw r25,FM_ARG0+0x00(r1) ; Save a register | |
2362 | stw r26,FM_ARG0+0x04(r1) ; Save a register | |
2363 | mr r25,r6 ; Remember address of next va | |
2364 | stw r27,FM_ARG0+0x08(r1) ; Save a register | |
2365 | stw r28,FM_ARG0+0x0C(r1) ; Save a register | |
2366 | stw r29,FM_ARG0+0x10(r1) ; Save a register | |
2367 | stw r30,FM_ARG0+0x14(r1) ; Save a register | |
2368 | stw r31,FM_ARG0+0x18(r1) ; Save a register | |
2369 | stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1c79356b | 2370 | |
91447636 A |
2371 | #if DEBUG |
2372 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
2373 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
2374 | bne hfmPanic ; Call not valid for guest shadow assist pmap | |
2375 | #endif | |
2376 | ||
55e303ae A |
2377 | lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap |
2378 | lwz r7,pmapvr+4(r3) ; Get the second part | |
1c79356b | 2379 | |
1c79356b | 2380 | |
55e303ae A |
2381 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit |
2382 | ||
2383 | mr r27,r11 ; Remember the old MSR | |
2384 | mr r26,r12 ; Remember the feature bits | |
9bccf70c | 2385 | |
55e303ae | 2386 | xor r28,r3,r7 ; Change the common 32- and 64-bit half |
9bccf70c | 2387 | |
55e303ae | 2388 | bf-- pf64Bitb,hfmSF1 ; skip if 32-bit... |
1c79356b | 2389 | |
55e303ae | 2390 | rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top |
1c79356b | 2391 | |
55e303ae A |
2392 | hfmSF1: mr r29,r4 ; Save top half of vaddr |
2393 | mr r30,r5 ; Save the bottom half | |
2394 | ||
2395 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
2396 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
2397 | mr. r3,r3 ; Did we get the lock? | |
2398 | bne-- hfmBadLock ; Nope... | |
1c79356b | 2399 | |
55e303ae A |
2400 | mr r3,r28 ; get the pmap address |
2401 | mr r4,r29 ; Get bits 0:31 to look for | |
2402 | mr r5,r30 ; Get bits 32:64 | |
2403 | ||
2404 | bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags) | |
1c79356b | 2405 | |
55e303ae A |
2406 | rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit |
2407 | mr. r31,r3 ; Save the mapping if we found it | |
2408 | cmplwi cr1,r0,0 ; Are we removing? | |
2409 | mr r29,r4 ; Save next va high half | |
2410 | crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing | |
2411 | mr r30,r5 ; Save next va low half | |
2412 | li r6,0 ; Assume we did not find it | |
2413 | li r26,0xFFF ; Get a mask to relocate to start of mapping page | |
1c79356b | 2414 | |
55e303ae | 2415 | bt-- cr0_eq,hfmNotFnd ; We did not find it... |
1c79356b | 2416 | |
55e303ae | 2417 | bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear |
1c79356b | 2418 | |
55e303ae | 2419 | andc r4,r31,r26 ; Get back to the mapping page start |
1c79356b | 2420 | |
55e303ae A |
2421 | ; Note: we can treat 32- and 64-bit the same here. Because we are going from |
2422 | ; physical to virtual and we only do 32-bit virtual, we only need the low order | |
2423 | ; word of the xor. | |
d7e50217 | 2424 | |
55e303ae A |
2425 | lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap |
2426 | li r6,-1 ; Indicate we found it and it is not being removed | |
2427 | xor r31,r31,r4 ; Flip to virtual | |
d7e50217 | 2428 | |
55e303ae A |
2429 | hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
2430 | bl sxlkUnlock ; Unlock the search list | |
d7e50217 | 2431 | |
55e303ae A |
2432 | rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit |
2433 | and r3,r3,r6 ; Clear if not found or removing | |
de355530 | 2434 | |
55e303ae | 2435 | hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes... |
de355530 | 2436 | |
55e303ae A |
2437 | mtmsr r27 ; Restore enables/translation/etc. |
2438 | isync | |
2439 | b hfmReturnC ; Join common... | |
2440 | ||
2441 | hfmR64: mtmsrd r27 ; Restore enables/translation/etc. | |
2442 | isync | |
2443 | ||
2444 | hfmReturnC: stw r29,0(r25) ; Save the top of the next va | |
2445 | stw r30,4(r25) ; Save the bottom of the next va | |
2446 | lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
2447 | lwz r25,FM_ARG0+0x00(r1) ; Restore a register | |
2448 | lwz r26,FM_ARG0+0x04(r1) ; Restore a register | |
2449 | and r3,r3,r6 ; Clear return if the mapping is being removed | |
2450 | lwz r27,FM_ARG0+0x08(r1) ; Restore a register | |
2451 | mtlr r0 ; Restore the return | |
2452 | lwz r28,FM_ARG0+0x0C(r1) ; Restore a register | |
2453 | lwz r29,FM_ARG0+0x10(r1) ; Restore a register | |
2454 | lwz r30,FM_ARG0+0x14(r1) ; Restore a register | |
2455 | lwz r31,FM_ARG0+0x18(r1) ; Restore a register | |
2456 | lwz r1,0(r1) ; Pop the stack | |
2457 | blr ; Leave... | |
2458 | ||
2459 | .align 5 | |
2460 | ||
2461 | hfmBadLock: li r3,1 ; Set lock time out error code | |
2462 | b hfmReturn ; Leave.... | |
1c79356b | 2463 | |
91447636 A |
2464 | hfmPanic: lis r0,hi16(Choke) ; System abend |
2465 | ori r0,r0,lo16(Choke) ; System abend | |
2466 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
2467 | sc | |
2468 | ||
2469 | ||
2470 | /* | |
2471 | * void hw_clear_maps(void) | |
2472 | * | |
2473 | * Remove all mappings for all phys entries. | |
2474 | * | |
2475 | * | |
2476 | */ | |
2477 | ||
2478 | .align 5 | |
2479 | .globl EXT(hw_clear_maps) | |
2480 | ||
2481 | LEXT(hw_clear_maps) | |
2482 | mflr r10 ; Save the link register | |
2483 | mfcr r9 ; Save the condition register | |
2484 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
2485 | ||
2486 | lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
2487 | ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
2488 | ||
2489 | hcmNextRegion: | |
2490 | lwz r3,mrPhysTab(r5) ; Get the actual table address | |
2491 | lwz r0,mrStart(r5) ; Get start of table entry | |
2492 | lwz r4,mrEnd(r5) ; Get end of table entry | |
2493 | addi r5,r5,mrSize ; Point to the next regions | |
2494 | ||
2495 | cmplwi r3,0 ; No more regions? | |
2496 | beq-- hcmDone ; Leave... | |
2497 | ||
2498 | sub r4,r4,r0 ; Calculate physical entry count | |
2499 | addi r4,r4,1 | |
2500 | mtctr r4 | |
2501 | ||
2502 | bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version | |
2503 | ||
2504 | ||
2505 | hcmNextPhys32: | |
2506 | lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping | |
2507 | addi r3,r3,physEntrySize ; Next phys_entry | |
2508 | ||
2509 | hcmNextMap32: | |
3a60a9f5 | 2510 | rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address |
91447636 A |
2511 | beq hcmNoMap32 ; Did not find one... |
2512 | ||
2513 | lwz r0,mpPte(r4) ; Grab the offset to the PTE | |
2514 | rlwinm r0,r0,0,~mpHValid ; Clear out valid bit | |
2515 | stw r0,mpPte(r4) ; Get the quick pointer again | |
2516 | ||
2517 | lwz r4,mpAlias+4(r4) ; Chain on to the next | |
2518 | b hcmNextMap32 ; Check it out... | |
2519 | hcmNoMap32: | |
2520 | bdnz hcmNextPhys32 | |
2521 | b hcmNextRegion | |
2522 | ||
2523 | ||
2524 | .align 5 | |
2525 | hcmNextPhys64: | |
2526 | li r0,ppLFAmask ; Get mask to clean up mapping pointer | |
2527 | ld r4,ppLink(r3) ; Get the pointer to the first mapping | |
2528 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
2529 | addi r3,r3,physEntrySize ; Next phys_entry | |
2530 | ||
2531 | hcmNextMap64: | |
2532 | andc. r4,r4,r0 ; Clean and test mapping address | |
2533 | beq hcmNoMap64 ; Did not find one... | |
2534 | ||
2535 | lwz r0,mpPte(r4) ; Grab the offset to the PTE | |
2536 | rlwinm r0,r0,0,~mpHValid ; Clear out valid bit | |
2537 | stw r0,mpPte(r4) ; Get the quick pointer again | |
2538 | ||
2539 | ld r4,mpAlias(r4) ; Chain on to the next | |
2540 | li r0,ppLFAmask ; Get mask to clean up mapping pointer | |
2541 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
2542 | b hcmNextMap64 ; Check it out... | |
2543 | hcmNoMap64: | |
2544 | bdnz hcmNextPhys64 | |
2545 | b hcmNextRegion | |
2546 | ||
2547 | ||
2548 | .align 5 | |
2549 | hcmDone: | |
2550 | mtlr r10 ; Restore the return | |
2551 | mtcr r9 ; Restore the condition register | |
2552 | bt++ pf64Bitb,hcmDone64 ; 64-bit version | |
2553 | hcmDone32: | |
2554 | mtmsr r11 ; Restore translation/mode/etc. | |
2555 | isync | |
2556 | blr ; Leave... | |
2557 | ||
2558 | hcmDone64: | |
2559 | mtmsrd r11 ; Restore translation/mode/etc. | |
2560 | isync | |
2561 | blr ; Leave... | |
2562 | ||
2563 | ||
1c79356b A |
2564 | |
2565 | /* | |
91447636 | 2566 | * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod) |
55e303ae A |
2567 | * walks all mapping for a physical page and performs |
2568 | * specified operations on each. | |
1c79356b | 2569 | * |
55e303ae A |
2570 | * pp is unlocked physent |
2571 | * preop is operation to perform on physent before walk. This would be | |
2572 | * used to set cache attribute or protection | |
2573 | * op is the operation to perform on each mapping during walk | |
2574 | * postop is operation to perform in the phsyent after walk. this would be | |
2575 | * used to set or reset the RC bits. | |
91447636 A |
2576 | * opmod modifies the action taken on any connected PTEs visited during |
2577 | * the mapping walk. | |
55e303ae A |
2578 | * |
2579 | * We return the RC bits from before postop is run. | |
2580 | * | |
2581 | * Note that this is designed to be called from 32-bit mode with a stack. | |
1c79356b | 2582 | * |
55e303ae A |
2583 | * We disable translation and all interruptions here. This keeps is |
2584 | * from having to worry about a deadlock due to having anything locked | |
2585 | * and needing it to process a fault. | |
d7e50217 | 2586 | * |
55e303ae A |
2587 | * We lock the physent, execute preop, and then walk each mapping in turn. |
2588 | * If there is a PTE, it is invalidated and the RC merged into the physent. | |
2589 | * Then we call the op function. | |
2590 | * Then we revalidate the PTE. | |
2591 | * Once all all mappings are finished, we save the physent RC and call the | |
2592 | * postop routine. Then we unlock the physent and return the RC. | |
2593 | * | |
2594 | * | |
1c79356b A |
2595 | */ |
2596 | ||
1c79356b | 2597 | .align 5 |
55e303ae A |
2598 | .globl EXT(hw_walk_phys) |
2599 | ||
2600 | LEXT(hw_walk_phys) | |
91447636 | 2601 | stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack |
55e303ae | 2602 | mflr r0 ; Save the link register |
91447636 A |
2603 | stw r24,FM_ARG0+0x00(r1) ; Save a register |
2604 | stw r25,FM_ARG0+0x04(r1) ; Save a register | |
2605 | stw r26,FM_ARG0+0x08(r1) ; Save a register | |
2606 | stw r27,FM_ARG0+0x0C(r1) ; Save a register | |
2607 | mr r24,r8 ; Save the parm | |
55e303ae | 2608 | mr r25,r7 ; Save the parm |
91447636 A |
2609 | stw r28,FM_ARG0+0x10(r1) ; Save a register |
2610 | stw r29,FM_ARG0+0x14(r1) ; Save a register | |
2611 | stw r30,FM_ARG0+0x18(r1) ; Save a register | |
2612 | stw r31,FM_ARG0+0x1C(r1) ; Save a register | |
2613 | stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
55e303ae A |
2614 | |
2615 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
91447636 A |
2616 | |
2617 | mfsprg r26,0 ; (INSTRUMENTATION) | |
2618 | lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION) | |
2619 | addi r27,r27,1 ; (INSTRUMENTATION) | |
2620 | stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION) | |
2621 | la r26,hwWalkFull(r26) ; (INSTRUMENTATION) | |
2622 | slwi r12,r24,2 ; (INSTRUMENTATION) | |
2623 | lwzx r27,r26,r12 ; (INSTRUMENTATION) | |
2624 | addi r27,r27,1 ; (INSTRUMENTATION) | |
2625 | stwx r27,r26,r12 ; (INSTRUMENTATION) | |
55e303ae A |
2626 | |
2627 | mr r26,r11 ; Save the old MSR | |
2628 | lis r27,hi16(hwpOpBase) ; Get high order of op base | |
2629 | slwi r4,r4,7 ; Convert preop to displacement | |
2630 | ori r27,r27,lo16(hwpOpBase) ; Get low order of op base | |
2631 | slwi r5,r5,7 ; Convert op to displacement | |
2632 | add r12,r4,r27 ; Point to the preop routine | |
2633 | slwi r28,r6,7 ; Convert postop to displacement | |
2634 | mtctr r12 ; Set preop routine | |
2635 | add r28,r28,r27 ; Get the address of the postop routine | |
2636 | add r27,r5,r27 ; Get the address of the op routine | |
1c79356b | 2637 | |
55e303ae | 2638 | bl mapPhysLock ; Lock the physent |
1c79356b | 2639 | |
55e303ae A |
2640 | mr r29,r3 ; Save the physent address |
2641 | ||
2642 | bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint) | |
2643 | ||
2644 | bctrl ; Call preop routine | |
2645 | bne- hwpEarly32 ; preop says to bail now... | |
91447636 A |
2646 | |
2647 | cmplwi r24,hwpMergePTE ; Classify operation modifier | |
55e303ae A |
2648 | mtctr r27 ; Set up the op function address |
2649 | lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping | |
91447636 A |
2650 | blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping |
2651 | beq hwpMSrc32 ; Do TLB merge for each mapping | |
2652 | ||
3a60a9f5 | 2653 | hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address |
91447636 | 2654 | beq hwpNone32 ; Did not find one... |
55e303ae | 2655 | |
91447636 A |
2656 | bctrl ; Call the op function |
2657 | ||
2658 | bne- hwpEarly32 ; op says to bail now... | |
2659 | lwz r31,mpAlias+4(r31) ; Chain on to the next | |
2660 | b hwpQSrc32 ; Check it out... | |
2661 | ||
2662 | .align 5 | |
3a60a9f5 | 2663 | hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address |
55e303ae | 2664 | beq hwpNone32 ; Did not find one... |
91447636 A |
2665 | |
2666 | bl mapMergeRC32 ; Merge reference and change into mapping and physent | |
2667 | bctrl ; Call the op function | |
2668 | ||
2669 | bne- hwpEarly32 ; op says to bail now... | |
2670 | lwz r31,mpAlias+4(r31) ; Chain on to the next | |
2671 | b hwpMSrc32 ; Check it out... | |
d7e50217 | 2672 | |
91447636 A |
2673 | .align 5 |
2674 | hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address | |
2675 | beq hwpNone32 ; Did not find one... | |
2676 | ||
55e303ae A |
2677 | ; |
2678 | ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4, | |
2679 | ; PTE low in R5. The PCA address is in R7. The PTEG come back locked. | |
2680 | ; If there is no PTE, PTE low is obtained from mapping | |
2681 | ; | |
2682 | bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent | |
2683 | ||
2684 | bctrl ; Call the op function | |
2685 | ||
2686 | crmove cr1_eq,cr0_eq ; Save the return code | |
2687 | ||
2688 | mr. r3,r3 ; Was there a previously valid PTE? | |
2689 | beq- hwpNxt32 ; Nope... | |
1c79356b | 2690 | |
55e303ae A |
2691 | stw r5,4(r3) ; Store second half of PTE |
2692 | eieio ; Make sure we do not reorder | |
2693 | stw r4,0(r3) ; Revalidate the PTE | |
2694 | ||
2695 | eieio ; Make sure all updates come first | |
2696 | stw r6,0(r7) ; Unlock the PCA | |
d7e50217 | 2697 | |
55e303ae A |
2698 | hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now... |
2699 | lwz r31,mpAlias+4(r31) ; Chain on to the next | |
2700 | b hwpSrc32 ; Check it out... | |
1c79356b | 2701 | |
55e303ae | 2702 | .align 5 |
1c79356b | 2703 | |
55e303ae | 2704 | hwpNone32: mtctr r28 ; Get the post routine address |
1c79356b | 2705 | |
55e303ae A |
2706 | lwz r30,ppLink+4(r29) ; Save the old RC |
2707 | mr r3,r29 ; Get the physent address | |
2708 | bctrl ; Call post routine | |
1c79356b | 2709 | |
55e303ae A |
2710 | bl mapPhysUnlock ; Unlock the physent |
2711 | ||
2712 | mtmsr r26 ; Restore translation/mode/etc. | |
2713 | isync | |
1c79356b | 2714 | |
55e303ae | 2715 | b hwpReturn ; Go restore registers and return... |
1c79356b | 2716 | |
55e303ae | 2717 | .align 5 |
1c79356b | 2718 | |
55e303ae A |
2719 | hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC |
2720 | mr r3,r29 ; Get the physent address | |
2721 | bl mapPhysUnlock ; Unlock the physent | |
2722 | ||
2723 | mtmsr r26 ; Restore translation/mode/etc. | |
2724 | isync | |
2725 | ||
2726 | b hwpReturn ; Go restore registers and return... | |
1c79356b | 2727 | |
55e303ae | 2728 | .align 5 |
1c79356b | 2729 | |
55e303ae A |
2730 | hwp64: bctrl ; Call preop routine |
2731 | bne-- hwpEarly64 ; preop says to bail now... | |
d7e50217 | 2732 | |
91447636 | 2733 | cmplwi r24,hwpMergePTE ; Classify operation modifier |
55e303ae A |
2734 | mtctr r27 ; Set up the op function address |
2735 | ||
91447636 | 2736 | li r24,ppLFAmask |
55e303ae | 2737 | ld r31,ppLink(r3) ; Get the pointer to the first mapping |
91447636 A |
2738 | rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
2739 | blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping | |
2740 | beq hwpMSrc64 ; Do TLB merge for each mapping | |
55e303ae | 2741 | |
91447636 A |
2742 | hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address |
2743 | beq hwpNone64 ; Did not find one... | |
2744 | ||
2745 | bctrl ; Call the op function | |
2746 | ||
2747 | bne-- hwpEarly64 ; op says to bail now... | |
2748 | ld r31,mpAlias(r31) ; Chain on to the next | |
2749 | b hwpQSrc64 ; Check it out... | |
2750 | ||
2751 | .align 5 | |
2752 | hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address | |
2753 | beq hwpNone64 ; Did not find one... | |
2754 | ||
2755 | bl mapMergeRC64 ; Merge reference and change into mapping and physent | |
2756 | bctrl ; Call the op function | |
2757 | ||
2758 | bne-- hwpEarly64 ; op says to bail now... | |
2759 | ld r31,mpAlias(r31) ; Chain on to the next | |
2760 | b hwpMSrc64 ; Check it out... | |
2761 | ||
2762 | .align 5 | |
2763 | hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address | |
55e303ae A |
2764 | beq hwpNone64 ; Did not find one... |
2765 | ; | |
2766 | ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4, | |
2767 | ; PTE low in R5. PTEG comes back locked if there is one | |
2768 | ; | |
2769 | bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent | |
1c79356b | 2770 | |
55e303ae | 2771 | bctrl ; Call the op function |
1c79356b | 2772 | |
55e303ae | 2773 | crmove cr1_eq,cr0_eq ; Save the return code |
1c79356b | 2774 | |
55e303ae A |
2775 | mr. r3,r3 ; Was there a previously valid PTE? |
2776 | beq-- hwpNxt64 ; Nope... | |
2777 | ||
2778 | std r5,8(r3) ; Save bottom of PTE | |
2779 | eieio ; Make sure we do not reorder | |
2780 | std r4,0(r3) ; Revalidate the PTE | |
d7e50217 | 2781 | |
55e303ae A |
2782 | eieio ; Make sure all updates come first |
2783 | stw r6,0(r7) ; Unlock the PCA | |
2784 | ||
2785 | hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now... | |
2786 | ld r31,mpAlias(r31) ; Chain on to the next | |
55e303ae | 2787 | b hwpSrc64 ; Check it out... |
1c79356b | 2788 | |
55e303ae A |
2789 | .align 5 |
2790 | ||
2791 | hwpNone64: mtctr r28 ; Get the post routine address | |
2792 | ||
2793 | lwz r30,ppLink+4(r29) ; Save the old RC | |
2794 | mr r3,r29 ; Get the physent address | |
2795 | bctrl ; Call post routine | |
2796 | ||
2797 | bl mapPhysUnlock ; Unlock the physent | |
2798 | ||
2799 | mtmsrd r26 ; Restore translation/mode/etc. | |
1c79356b | 2800 | isync |
55e303ae A |
2801 | b hwpReturn ; Go restore registers and return... |
2802 | ||
2803 | .align 5 | |
1c79356b | 2804 | |
55e303ae A |
2805 | hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC |
2806 | mr r3,r29 ; Get the physent address | |
2807 | bl mapPhysUnlock ; Unlock the physent | |
2808 | ||
2809 | mtmsrd r26 ; Restore translation/mode/etc. | |
2810 | isync | |
2811 | ||
91447636 A |
2812 | hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
2813 | lwz r24,FM_ARG0+0x00(r1) ; Restore a register | |
2814 | lwz r25,FM_ARG0+0x04(r1) ; Restore a register | |
2815 | lwz r26,FM_ARG0+0x08(r1) ; Restore a register | |
55e303ae | 2816 | mr r3,r30 ; Pass back the RC |
91447636 A |
2817 | lwz r27,FM_ARG0+0x0C(r1) ; Restore a register |
2818 | lwz r28,FM_ARG0+0x10(r1) ; Restore a register | |
55e303ae | 2819 | mtlr r0 ; Restore the return |
91447636 A |
2820 | lwz r29,FM_ARG0+0x14(r1) ; Restore a register |
2821 | lwz r30,FM_ARG0+0x18(r1) ; Restore a register | |
2822 | lwz r31,FM_ARG0+0x1C(r1) ; Restore a register | |
55e303ae A |
2823 | lwz r1,0(r1) ; Pop the stack |
2824 | blr ; Leave... | |
d7e50217 | 2825 | |
d7e50217 | 2826 | |
55e303ae A |
2827 | ; |
2828 | ; The preop/op/postop function table. | |
2829 | ; Each function must be 64-byte aligned and be no more than | |
2830 | ; 16 instructions. If more than 16, we must fix address calculations | |
2831 | ; at the start of hwpOpBase | |
2832 | ; | |
2833 | ; The routine must set CR0_EQ in order to continue scan. | |
2834 | ; If CR0_EQ is not set, an early return from the function is made. | |
2835 | ; | |
d7e50217 | 2836 | |
55e303ae A |
2837 | .align 7 |
2838 | ||
2839 | hwpOpBase: | |
2840 | ||
2841 | ; Function 0 - No operation | |
2842 | ||
2843 | hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set | |
2844 | blr ; Just return... | |
1c79356b A |
2845 | |
2846 | .align 5 | |
1c79356b | 2847 | |
55e303ae | 2848 | ; This is the continuation of function 4 - Set attributes in mapping |
1c79356b | 2849 | |
55e303ae A |
2850 | ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes. |
2851 | ; NOTE: Do we have to deal with i-cache here? | |
2852 | ||
91447636 | 2853 | hwpSAM: li r11,4096 ; Get page size |
d7e50217 | 2854 | |
55e303ae A |
2855 | hwpSAMinvd: sub. r11,r11,r9 ; Back off a line |
2856 | dcbf r11,r5 ; Flush the line in the data cache | |
2857 | bgt++ hwpSAMinvd ; Go do the rest of it... | |
2858 | ||
2859 | sync ; Make sure it is done | |
1c79356b | 2860 | |
91447636 | 2861 | li r11,4096 ; Get page size |
55e303ae A |
2862 | |
2863 | hwpSAMinvi: sub. r11,r11,r9 ; Back off a line | |
2864 | icbi r11,r5 ; Flush the line in the icache | |
2865 | bgt++ hwpSAMinvi ; Go do the rest of it... | |
2866 | ||
2867 | sync ; Make sure it is done | |
1c79356b | 2868 | |
55e303ae A |
2869 | cmpw r0,r0 ; Make sure we return CR0_EQ |
2870 | blr ; Return... | |
1c79356b | 2871 | |
1c79356b | 2872 | |
91447636 | 2873 | ; Function 1 - Set protection in physent (obsolete) |
1c79356b | 2874 | |
55e303ae A |
2875 | .set .,hwpOpBase+(1*128) ; Generate error if previous function too long |
2876 | ||
91447636 | 2877 | hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ |
55e303ae | 2878 | blr ; Return... |
1c79356b | 2879 | |
1c79356b | 2880 | |
55e303ae | 2881 | ; Function 2 - Set protection in mapping |
1c79356b | 2882 | |
0c530ab8 A |
2883 | ; NOTE: Changes to no-execute permission are ignored |
2884 | ||
55e303ae | 2885 | .set .,hwpOpBase+(2*128) ; Generate error if previous function too long |
1c79356b | 2886 | |
55e303ae A |
2887 | hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags |
2888 | lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping | |
2889 | rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent? | |
0c530ab8 | 2890 | li r0,lo16(mpPP) ; Get protection bits |
55e303ae | 2891 | crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent |
0c530ab8 | 2892 | rlwinm r2,r25,0,mpPP ; Isolate new protection bits |
55e303ae | 2893 | beqlr-- ; Leave if permanent mapping (before we trash R5)... |
0c530ab8 A |
2894 | andc r5,r5,r0 ; Clear the old prot bits |
2895 | or r5,r5,r2 ; Move in the new prot bits | |
55e303ae A |
2896 | rlwimi r8,r5,0,20,31 ; Copy into the mapping copy |
2897 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
2898 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
2899 | blr ; Leave... | |
2900 | ||
2901 | ; Function 3 - Set attributes in physent | |
1c79356b | 2902 | |
55e303ae | 2903 | .set .,hwpOpBase+(3*128) ; Generate error if previous function too long |
1c79356b | 2904 | |
91447636 | 2905 | hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent |
1c79356b | 2906 | |
55e303ae | 2907 | hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags |
91447636 | 2908 | rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes |
55e303ae A |
2909 | stwcx. r4,r5,r29 ; Try to stuff it |
2910 | bne-- hwpSAtrPhX ; Try again... | |
2911 | ; Note: CR0_EQ is set because of stwcx. | |
2912 | blr ; Return... | |
de355530 | 2913 | |
55e303ae | 2914 | ; Function 4 - Set attributes in mapping |
d7e50217 | 2915 | |
55e303ae A |
2916 | .set .,hwpOpBase+(4*128) ; Generate error if previous function too long |
2917 | ||
2918 | hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags | |
2919 | lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping | |
91447636 | 2920 | li r2,mpM ; Force on coherent |
55e303ae A |
2921 | rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent? |
2922 | li r0,lo16(mpWIMG) ; Get wimg mask | |
2923 | crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent | |
91447636 A |
2924 | rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32 |
2925 | ; Copy in the cache inhibited bit | |
55e303ae A |
2926 | beqlr-- ; Leave if permanent mapping (before we trash R5)... |
2927 | andc r5,r5,r0 ; Clear the old wimg | |
91447636 A |
2928 | rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32 |
2929 | ; Copy in the guarded bit | |
55e303ae A |
2930 | mfsprg r9,2 ; Feature flags |
2931 | or r5,r5,r2 ; Move in the new wimg | |
2932 | rlwimi r8,r5,0,20,31 ; Copy into the mapping copy | |
2933 | lwz r2,mpPAddr(r31) ; Get the physical address | |
2934 | li r0,0xFFF ; Start a mask | |
2935 | andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size | |
2936 | rlwinm r5,r0,0,1,0 ; Copy to top half | |
2937 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
2938 | rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left | |
2939 | and r5,r5,r2 ; Clean stuff in top 32 bits | |
2940 | andc r2,r2,r0 ; Clean bottom too | |
2941 | rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address | |
2942 | b hwpSAM ; Join common | |
1c79356b | 2943 | |
55e303ae A |
2944 | ; NOTE: we moved the remainder of the code out of here because it |
2945 | ; did not fit in the 128 bytes allotted. It got stuck into the free space | |
2946 | ; at the end of the no-op function. | |
2947 | ||
2948 | ||
2949 | ||
de355530 | 2950 | |
55e303ae | 2951 | ; Function 5 - Clear reference in physent |
1c79356b | 2952 | |
55e303ae | 2953 | .set .,hwpOpBase+(5*128) ; Generate error if previous function too long |
1c79356b | 2954 | |
55e303ae | 2955 | hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
1c79356b | 2956 | |
55e303ae | 2957 | hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags |
91447636 | 2958 | rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R |
55e303ae A |
2959 | stwcx. r4,r5,r29 ; Try to stuff it |
2960 | bne-- hwpCRefPhX ; Try again... | |
2961 | ; Note: CR0_EQ is set because of stwcx. | |
2962 | blr ; Return... | |
1c79356b A |
2963 | |
2964 | ||
55e303ae | 2965 | ; Function 6 - Clear reference in mapping |
1c79356b | 2966 | |
55e303ae | 2967 | .set .,hwpOpBase+(6*128) ; Generate error if previous function too long |
1c79356b | 2968 | |
55e303ae A |
2969 | hwpCRefMap: li r0,lo16(mpR) ; Get reference bit |
2970 | lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
2971 | andc r5,r5,r0 ; Clear in PTE copy | |
2972 | andc r8,r8,r0 ; and in the mapping | |
2973 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
2974 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
2975 | blr ; Return... | |
1c79356b | 2976 | |
de355530 | 2977 | |
55e303ae | 2978 | ; Function 7 - Clear change in physent |
1c79356b | 2979 | |
55e303ae | 2980 | .set .,hwpOpBase+(7*128) ; Generate error if previous function too long |
1c79356b | 2981 | |
55e303ae | 2982 | hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
1c79356b | 2983 | |
55e303ae A |
2984 | hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags |
2985 | rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C | |
2986 | stwcx. r4,r5,r29 ; Try to stuff it | |
2987 | bne-- hwpCCngPhX ; Try again... | |
2988 | ; Note: CR0_EQ is set because of stwcx. | |
2989 | blr ; Return... | |
1c79356b | 2990 | |
de355530 | 2991 | |
55e303ae | 2992 | ; Function 8 - Clear change in mapping |
1c79356b | 2993 | |
55e303ae A |
2994 | .set .,hwpOpBase+(8*128) ; Generate error if previous function too long |
2995 | ||
2996 | hwpCCngMap: li r0,lo16(mpC) ; Get change bit | |
2997 | lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
2998 | andc r5,r5,r0 ; Clear in PTE copy | |
2999 | andc r8,r8,r0 ; and in the mapping | |
3000 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
3001 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
3002 | blr ; Return... | |
d7e50217 | 3003 | |
de355530 | 3004 | |
55e303ae | 3005 | ; Function 9 - Set reference in physent |
d7e50217 | 3006 | |
55e303ae | 3007 | .set .,hwpOpBase+(9*128) ; Generate error if previous function too long |
d7e50217 | 3008 | |
55e303ae A |
3009 | hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
3010 | ||
3011 | hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags | |
3012 | ori r4,r4,lo16(ppR) ; Set the reference | |
3013 | stwcx. r4,r5,r29 ; Try to stuff it | |
3014 | bne-- hwpSRefPhX ; Try again... | |
3015 | ; Note: CR0_EQ is set because of stwcx. | |
3016 | blr ; Return... | |
d7e50217 | 3017 | |
1c79356b | 3018 | |
55e303ae | 3019 | ; Function 10 - Set reference in mapping |
d7e50217 | 3020 | |
55e303ae A |
3021 | .set .,hwpOpBase+(10*128) ; Generate error if previous function too long |
3022 | ||
3023 | hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
55e303ae A |
3024 | ori r8,r8,lo16(mpR) ; Set reference in mapping |
3025 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
3026 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
3027 | blr ; Return... | |
3028 | ||
3029 | ; Function 11 - Set change in physent | |
1c79356b | 3030 | |
55e303ae | 3031 | .set .,hwpOpBase+(11*128) ; Generate error if previous function too long |
1c79356b | 3032 | |
55e303ae | 3033 | hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
1c79356b | 3034 | |
55e303ae A |
3035 | hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags |
3036 | ori r4,r4,lo16(ppC) ; Set the change bit | |
3037 | stwcx. r4,r5,r29 ; Try to stuff it | |
3038 | bne-- hwpSCngPhX ; Try again... | |
3039 | ; Note: CR0_EQ is set because of stwcx. | |
3040 | blr ; Return... | |
de355530 | 3041 | |
55e303ae | 3042 | ; Function 12 - Set change in mapping |
1c79356b | 3043 | |
55e303ae | 3044 | .set .,hwpOpBase+(12*128) ; Generate error if previous function too long |
1c79356b | 3045 | |
55e303ae | 3046 | hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping |
55e303ae A |
3047 | ori r8,r8,lo16(mpC) ; Set chage in mapping |
3048 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
3049 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
3050 | blr ; Return... | |
1c79356b | 3051 | |
55e303ae | 3052 | ; Function 13 - Test reference in physent |
1c79356b | 3053 | |
55e303ae A |
3054 | .set .,hwpOpBase+(13*128) ; Generate error if previous function too long |
3055 | ||
3056 | hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent | |
3057 | rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0 | |
3058 | blr ; Return (CR0_EQ set to continue if reference is off)... | |
1c79356b | 3059 | |
1c79356b | 3060 | |
55e303ae | 3061 | ; Function 14 - Test reference in mapping |
1c79356b | 3062 | |
55e303ae | 3063 | .set .,hwpOpBase+(14*128) ; Generate error if previous function too long |
de355530 | 3064 | |
55e303ae A |
3065 | hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0 |
3066 | blr ; Return (CR0_EQ set to continue if reference is off)... | |
3067 | ||
91447636 | 3068 | |
55e303ae | 3069 | ; Function 15 - Test change in physent |
1c79356b | 3070 | |
55e303ae | 3071 | .set .,hwpOpBase+(15*128) ; Generate error if previous function too long |
1c79356b | 3072 | |
55e303ae A |
3073 | hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent |
3074 | rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0 | |
91447636 | 3075 | blr ; Return (CR0_EQ set to continue if change is off)... |
55e303ae A |
3076 | |
3077 | ||
3078 | ; Function 16 - Test change in mapping | |
3079 | ||
3080 | .set .,hwpOpBase+(16*128) ; Generate error if previous function too long | |
d7e50217 | 3081 | |
55e303ae | 3082 | hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0 |
91447636 A |
3083 | blr ; Return (CR0_EQ set to continue if change is off)... |
3084 | ||
3085 | ||
3086 | ; Function 17 - Test reference and change in physent | |
55e303ae A |
3087 | |
3088 | .set .,hwpOpBase+(17*128) ; Generate error if previous function too long | |
3089 | ||
91447636 A |
3090 | hwpTRefCngPhy: |
3091 | lwz r0,ppLink+4(r29) ; Get the flags from physent | |
3092 | rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits | |
3093 | cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1)) | |
3094 | crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0)) | |
3095 | blr ; Return (CR0_EQ set to continue if either R or C is off)... | |
3096 | ||
3097 | ||
3098 | ; Function 18 - Test reference and change in mapping | |
3099 | ||
3100 | .set .,hwpOpBase+(18*128) ; Generate error if previous function too long | |
3101 | hwpTRefCngMap: | |
3102 | rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping | |
3103 | cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1)) | |
3104 | crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0)) | |
3105 | blr ; Return (CR0_EQ set to continue if either R or C is off)... | |
3106 | ||
3107 | ||
3108 | ; Function 19 - Clear reference and change in physent | |
3109 | ||
3110 | .set .,hwpOpBase+(19*128) ; Generate error if previous function too long | |
3111 | hwpCRefCngPhy: | |
3112 | li r5,ppLink+4 ; Get offset for flag part of physent | |
3113 | ||
3114 | hwpCRefCngPhX: | |
3115 | lwarx r4,r5,r29 ; Get the old flags | |
3116 | andc r4,r4,r25 ; Clear R and C as specified by mask | |
3117 | stwcx. r4,r5,r29 ; Try to stuff it | |
3118 | bne-- hwpCRefCngPhX ; Try again... | |
3119 | ; Note: CR0_EQ is set because of stwcx. | |
3120 | blr ; Return... | |
3121 | ||
3122 | ||
3123 | ; Function 20 - Clear reference and change in mapping | |
3124 | ||
3125 | .set .,hwpOpBase+(20*128) ; Generate error if previous function too long | |
3126 | hwpCRefCngMap: | |
3127 | srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map) | |
3128 | lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
3129 | andc r5,r5,r0 ; Clear in PTE copy | |
3130 | andc r8,r8,r0 ; and in the mapping | |
3131 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
3132 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
3133 | blr ; Return... | |
3134 | ||
d7e50217 | 3135 | |
91447636 | 3136 | .set .,hwpOpBase+(21*128) ; Generate error if previous function too long |
d7e50217 | 3137 | |
de355530 | 3138 | ; |
91447636 | 3139 | ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping. |
55e303ae A |
3140 | ; |
3141 | ; Returns: | |
3142 | ; mapRtOK - if all is ok | |
3143 | ; mapRtBadLk - if mapping lock fails | |
3144 | ; mapRtPerm - if mapping is permanent | |
3145 | ; mapRtNotFnd - if mapping is not found | |
3146 | ; mapRtBlock - if mapping is a block | |
de355530 | 3147 | ; |
55e303ae A |
3148 | .align 5 |
3149 | .globl EXT(hw_protect) | |
d7e50217 | 3150 | |
55e303ae A |
3151 | LEXT(hw_protect) |
3152 | stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
3153 | mflr r0 ; Save the link register | |
3154 | stw r24,FM_ARG0+0x00(r1) ; Save a register | |
3155 | stw r25,FM_ARG0+0x04(r1) ; Save a register | |
3156 | mr r25,r7 ; Remember address of next va | |
3157 | stw r26,FM_ARG0+0x08(r1) ; Save a register | |
3158 | stw r27,FM_ARG0+0x0C(r1) ; Save a register | |
3159 | stw r28,FM_ARG0+0x10(r1) ; Save a register | |
3160 | mr r24,r6 ; Save the new protection flags | |
3161 | stw r29,FM_ARG0+0x14(r1) ; Save a register | |
3162 | stw r30,FM_ARG0+0x18(r1) ; Save a register | |
3163 | stw r31,FM_ARG0+0x1C(r1) ; Save a register | |
3164 | stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1c79356b | 3165 | |
91447636 A |
3166 | #if DEBUG |
3167 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
3168 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
3169 | bne hpPanic ; Call not valid for guest shadow assist pmap | |
3170 | #endif | |
3171 | ||
55e303ae A |
3172 | lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap |
3173 | lwz r7,pmapvr+4(r3) ; Get the second part | |
d7e50217 | 3174 | |
d7e50217 | 3175 | |
55e303ae | 3176 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit |
9bccf70c | 3177 | |
55e303ae A |
3178 | mr r27,r11 ; Remember the old MSR |
3179 | mr r26,r12 ; Remember the feature bits | |
9bccf70c | 3180 | |
55e303ae | 3181 | xor r28,r3,r7 ; Change the common 32- and 64-bit half |
9bccf70c | 3182 | |
55e303ae A |
3183 | bf-- pf64Bitb,hpSF1 ; skip if 32-bit... |
3184 | ||
3185 | rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top | |
9bccf70c | 3186 | |
55e303ae A |
3187 | hpSF1: mr r29,r4 ; Save top half of vaddr |
3188 | mr r30,r5 ; Save the bottom half | |
3189 | ||
3190 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3191 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
3192 | mr. r3,r3 ; Did we get the lock? | |
3193 | bne-- hpBadLock ; Nope... | |
d7e50217 | 3194 | |
55e303ae A |
3195 | mr r3,r28 ; get the pmap address |
3196 | mr r4,r29 ; Get bits 0:31 to look for | |
3197 | mr r5,r30 ; Get bits 32:64 | |
de355530 | 3198 | |
55e303ae | 3199 | bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags) |
d7e50217 | 3200 | |
91447636 A |
3201 | rlwinm. r0,r7,0,mpType ; Is this a normal mapping? |
3202 | crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping | |
3203 | andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed? | |
3204 | cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed | |
55e303ae | 3205 | mr. r31,r3 ; Save the mapping if we found it |
55e303ae A |
3206 | mr r29,r4 ; Save next va high half |
3207 | mr r30,r5 ; Save next va low half | |
d7e50217 | 3208 | |
55e303ae | 3209 | beq-- hpNotFound ; Not found... |
de355530 | 3210 | |
91447636 | 3211 | bf-- cr1_eq,hpNotAllowed ; Something special is happening... |
d7e50217 | 3212 | |
55e303ae A |
3213 | bt++ pf64Bitb,hpDo64 ; Split for 64 bit |
3214 | ||
3215 | bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent | |
3216 | ||
91447636 | 3217 | rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit) |
55e303ae A |
3218 | mr. r3,r3 ; Was there a previously valid PTE? |
3219 | ||
3220 | stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest) | |
3221 | ||
3222 | beq-- hpNoOld32 ; Nope... | |
1c79356b | 3223 | |
55e303ae A |
3224 | stw r5,4(r3) ; Store second half of PTE |
3225 | eieio ; Make sure we do not reorder | |
3226 | stw r4,0(r3) ; Revalidate the PTE | |
3227 | ||
3228 | eieio ; Make sure all updates come first | |
3229 | stw r6,0(r7) ; Unlock PCA | |
3230 | ||
3231 | hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3232 | bl sxlkUnlock ; Unlock the search list | |
de355530 | 3233 | |
55e303ae A |
3234 | li r3,mapRtOK ; Set normal return |
3235 | b hpR32 ; Join common... | |
3236 | ||
3237 | .align 5 | |
1c79356b | 3238 | |
d7e50217 | 3239 | |
55e303ae A |
3240 | hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent |
3241 | ||
91447636 | 3242 | rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits |
55e303ae A |
3243 | mr. r3,r3 ; Was there a previously valid PTE? |
3244 | ||
3245 | stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest) | |
3246 | ||
3247 | beq-- hpNoOld64 ; Nope... | |
d7e50217 | 3248 | |
55e303ae A |
3249 | std r5,8(r3) ; Store second half of PTE |
3250 | eieio ; Make sure we do not reorder | |
3251 | std r4,0(r3) ; Revalidate the PTE | |
de355530 | 3252 | |
55e303ae A |
3253 | eieio ; Make sure all updates come first |
3254 | stw r6,0(r7) ; Unlock PCA | |
de355530 | 3255 | |
55e303ae A |
3256 | hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3257 | bl sxlkUnlock ; Unlock the search list | |
de355530 | 3258 | |
55e303ae A |
3259 | li r3,mapRtOK ; Set normal return |
3260 | b hpR64 ; Join common... | |
de355530 | 3261 | |
55e303ae A |
3262 | .align 5 |
3263 | ||
3264 | hpReturn: bt++ pf64Bitb,hpR64 ; Yes... | |
3265 | ||
3266 | hpR32: mtmsr r27 ; Restore enables/translation/etc. | |
3267 | isync | |
3268 | b hpReturnC ; Join common... | |
3269 | ||
3270 | hpR64: mtmsrd r27 ; Restore enables/translation/etc. | |
3271 | isync | |
3272 | ||
3273 | hpReturnC: stw r29,0(r25) ; Save the top of the next va | |
3274 | stw r30,4(r25) ; Save the bottom of the next va | |
3275 | lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
3276 | lwz r24,FM_ARG0+0x00(r1) ; Save a register | |
3277 | lwz r25,FM_ARG0+0x04(r1) ; Save a register | |
3278 | lwz r26,FM_ARG0+0x08(r1) ; Save a register | |
3279 | mtlr r0 ; Restore the return | |
3280 | lwz r27,FM_ARG0+0x0C(r1) ; Save a register | |
3281 | lwz r28,FM_ARG0+0x10(r1) ; Save a register | |
3282 | lwz r29,FM_ARG0+0x14(r1) ; Save a register | |
3283 | lwz r30,FM_ARG0+0x18(r1) ; Save a register | |
3284 | lwz r31,FM_ARG0+0x1C(r1) ; Save a register | |
3285 | lwz r1,0(r1) ; Pop the stack | |
3286 | blr ; Leave... | |
3287 | ||
3288 | .align 5 | |
3289 | ||
3290 | hpBadLock: li r3,mapRtBadLk ; Set lock time out error code | |
3291 | b hpReturn ; Leave.... | |
d7e50217 | 3292 | |
55e303ae A |
3293 | hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3294 | bl sxlkUnlock ; Unlock the search list | |
d7e50217 | 3295 | |
55e303ae A |
3296 | li r3,mapRtNotFnd ; Set that we did not find the requested page |
3297 | b hpReturn ; Leave.... | |
3298 | ||
3299 | hpNotAllowed: | |
3300 | rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed? | |
3301 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3302 | bne-- hpNotFound ; Yeah... | |
3303 | bl sxlkUnlock ; Unlock the search list | |
3304 | ||
3305 | li r3,mapRtBlock ; Assume it was a block | |
91447636 A |
3306 | rlwinm r0,r7,0,mpType ; Isolate mapping type |
3307 | cmplwi r0,mpBlock ; Is this a block mapping? | |
3308 | beq++ hpReturn ; Yes, leave... | |
55e303ae A |
3309 | |
3310 | li r3,mapRtPerm ; Set that we hit a permanent page | |
3311 | b hpReturn ; Leave.... | |
9bccf70c | 3312 | |
91447636 A |
3313 | hpPanic: lis r0,hi16(Choke) ; System abend |
3314 | ori r0,r0,lo16(Choke) ; System abend | |
3315 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
3316 | sc | |
3317 | ||
9bccf70c | 3318 | |
55e303ae A |
3319 | ; |
3320 | ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va | |
3321 | ; | |
3322 | ; Returns following code ORed with RC from mapping | |
3323 | ; mapRtOK - if all is ok | |
3324 | ; mapRtBadLk - if mapping lock fails | |
3325 | ; mapRtNotFnd - if mapping is not found | |
3326 | ; | |
3327 | .align 5 | |
3328 | .globl EXT(hw_test_rc) | |
9bccf70c | 3329 | |
55e303ae A |
3330 | LEXT(hw_test_rc) |
3331 | stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
3332 | mflr r0 ; Save the link register | |
3333 | stw r24,FM_ARG0+0x00(r1) ; Save a register | |
3334 | stw r25,FM_ARG0+0x04(r1) ; Save a register | |
3335 | stw r26,FM_ARG0+0x08(r1) ; Save a register | |
3336 | stw r27,FM_ARG0+0x0C(r1) ; Save a register | |
3337 | stw r28,FM_ARG0+0x10(r1) ; Save a register | |
3338 | mr r24,r6 ; Save the reset request | |
3339 | stw r29,FM_ARG0+0x14(r1) ; Save a register | |
3340 | stw r30,FM_ARG0+0x18(r1) ; Save a register | |
3341 | stw r31,FM_ARG0+0x1C(r1) ; Save a register | |
3342 | stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
9bccf70c | 3343 | |
91447636 A |
3344 | #if DEBUG |
3345 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
3346 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
3347 | bne htrPanic ; Call not valid for guest shadow assist pmap | |
3348 | #endif | |
3349 | ||
55e303ae A |
3350 | lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap |
3351 | lwz r7,pmapvr+4(r3) ; Get the second part | |
0b4e3aa0 | 3352 | |
9bccf70c | 3353 | |
55e303ae | 3354 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit |
9bccf70c | 3355 | |
55e303ae A |
3356 | mr r27,r11 ; Remember the old MSR |
3357 | mr r26,r12 ; Remember the feature bits | |
9bccf70c | 3358 | |
55e303ae | 3359 | xor r28,r3,r7 ; Change the common 32- and 64-bit half |
9bccf70c | 3360 | |
55e303ae | 3361 | bf-- pf64Bitb,htrSF1 ; skip if 32-bit... |
1c79356b | 3362 | |
55e303ae | 3363 | rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top |
1c79356b | 3364 | |
55e303ae A |
3365 | htrSF1: mr r29,r4 ; Save top half of vaddr |
3366 | mr r30,r5 ; Save the bottom half | |
1c79356b | 3367 | |
55e303ae A |
3368 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3369 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
3370 | mr. r3,r3 ; Did we get the lock? | |
3371 | li r25,0 ; Clear RC | |
3372 | bne-- htrBadLock ; Nope... | |
3373 | ||
3374 | mr r3,r28 ; get the pmap address | |
3375 | mr r4,r29 ; Get bits 0:31 to look for | |
3376 | mr r5,r30 ; Get bits 32:64 | |
d7e50217 | 3377 | |
55e303ae | 3378 | bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags) |
9bccf70c | 3379 | |
91447636 A |
3380 | rlwinm. r0,r7,0,mpType ; Is this a normal mapping? |
3381 | crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping | |
3382 | andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed? | |
3383 | crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed | |
55e303ae | 3384 | mr. r31,r3 ; Save the mapping if we found it |
91447636 | 3385 | crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed |
d7e50217 | 3386 | |
91447636 | 3387 | bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed... |
1c79356b | 3388 | |
55e303ae A |
3389 | bt++ pf64Bitb,htrDo64 ; Split for 64 bit |
3390 | ||
3391 | bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent | |
3392 | ||
3393 | cmplwi cr1,r24,0 ; Do we want to clear RC? | |
3394 | lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field | |
3395 | mr. r3,r3 ; Was there a previously valid PTE? | |
3396 | li r0,lo16(mpR|mpC) ; Get bits to clear | |
9bccf70c | 3397 | |
55e303ae A |
3398 | and r25,r5,r0 ; Save the RC bits |
3399 | beq++ cr1,htrNoClr32 ; Nope... | |
3400 | ||
3401 | andc r12,r12,r0 ; Clear mapping copy of RC | |
3402 | andc r5,r5,r0 ; Clear PTE copy of RC | |
3403 | sth r12,mpVAddr+6(r31) ; Set the new RC | |
9bccf70c | 3404 | |
55e303ae | 3405 | htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE... |
d7e50217 | 3406 | |
55e303ae A |
3407 | sth r5,6(r3) ; Store updated RC |
3408 | eieio ; Make sure we do not reorder | |
3409 | stw r4,0(r3) ; Revalidate the PTE | |
9bccf70c | 3410 | |
55e303ae A |
3411 | eieio ; Make sure all updates come first |
3412 | stw r6,0(r7) ; Unlock PCA | |
1c79356b | 3413 | |
55e303ae A |
3414 | htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3415 | bl sxlkUnlock ; Unlock the search list | |
3416 | li r3,mapRtOK ; Set normal return | |
3417 | b htrR32 ; Join common... | |
1c79356b | 3418 | |
55e303ae A |
3419 | .align 5 |
3420 | ||
3421 | ||
3422 | htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent | |
3423 | ||
3424 | cmplwi cr1,r24,0 ; Do we want to clear RC? | |
3425 | lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field | |
3426 | mr. r3,r3 ; Was there a previously valid PTE? | |
3427 | li r0,lo16(mpR|mpC) ; Get bits to clear | |
1c79356b | 3428 | |
55e303ae A |
3429 | and r25,r5,r0 ; Save the RC bits |
3430 | beq++ cr1,htrNoClr64 ; Nope... | |
3431 | ||
3432 | andc r12,r12,r0 ; Clear mapping copy of RC | |
3433 | andc r5,r5,r0 ; Clear PTE copy of RC | |
3434 | sth r12,mpVAddr+6(r31) ; Set the new RC | |
1c79356b | 3435 | |
55e303ae A |
3436 | htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte... |
3437 | ||
3438 | sth r5,14(r3) ; Store updated RC | |
3439 | eieio ; Make sure we do not reorder | |
3440 | std r4,0(r3) ; Revalidate the PTE | |
1c79356b | 3441 | |
55e303ae A |
3442 | eieio ; Make sure all updates come first |
3443 | stw r6,0(r7) ; Unlock PCA | |
1c79356b | 3444 | |
55e303ae A |
3445 | htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3446 | bl sxlkUnlock ; Unlock the search list | |
3447 | li r3,mapRtOK ; Set normal return | |
3448 | b htrR64 ; Join common... | |
de355530 | 3449 | |
55e303ae A |
3450 | .align 5 |
3451 | ||
3452 | htrReturn: bt++ pf64Bitb,htrR64 ; Yes... | |
de355530 | 3453 | |
55e303ae A |
3454 | htrR32: mtmsr r27 ; Restore enables/translation/etc. |
3455 | isync | |
3456 | b htrReturnC ; Join common... | |
de355530 | 3457 | |
55e303ae A |
3458 | htrR64: mtmsrd r27 ; Restore enables/translation/etc. |
3459 | isync | |
1c79356b | 3460 | |
55e303ae A |
3461 | htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return |
3462 | or r3,r3,r25 ; Send the RC bits back | |
3463 | lwz r24,FM_ARG0+0x00(r1) ; Save a register | |
3464 | lwz r25,FM_ARG0+0x04(r1) ; Save a register | |
3465 | lwz r26,FM_ARG0+0x08(r1) ; Save a register | |
3466 | mtlr r0 ; Restore the return | |
3467 | lwz r27,FM_ARG0+0x0C(r1) ; Save a register | |
3468 | lwz r28,FM_ARG0+0x10(r1) ; Save a register | |
3469 | lwz r29,FM_ARG0+0x14(r1) ; Save a register | |
3470 | lwz r30,FM_ARG0+0x18(r1) ; Save a register | |
3471 | lwz r31,FM_ARG0+0x1C(r1) ; Save a register | |
3472 | lwz r1,0(r1) ; Pop the stack | |
1c79356b A |
3473 | blr ; Leave... |
3474 | ||
3475 | .align 5 | |
3476 | ||
55e303ae A |
3477 | htrBadLock: li r3,mapRtBadLk ; Set lock time out error code |
3478 | b htrReturn ; Leave.... | |
1c79356b | 3479 | |
55e303ae A |
3480 | htrNotFound: |
3481 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3482 | bl sxlkUnlock ; Unlock the search list | |
1c79356b | 3483 | |
55e303ae A |
3484 | li r3,mapRtNotFnd ; Set that we did not find the requested page |
3485 | b htrReturn ; Leave.... | |
3486 | ||
91447636 A |
3487 | htrPanic: lis r0,hi16(Choke) ; System abend |
3488 | ori r0,r0,lo16(Choke) ; System abend | |
3489 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
3490 | sc | |
3491 | ||
3492 | ||
3493 | ; | |
3494 | ; | |
3495 | ; mapFindLockPN - find and lock physent for a given page number | |
3496 | ; | |
3497 | ; | |
3498 | .align 5 | |
3499 | mapFindLockPN: | |
3500 | lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
3501 | mr r2,r3 ; Save our target | |
3502 | ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
3503 | ||
3504 | mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address | |
3505 | lwz r5,mrStart(r9) ; Get start of table entry | |
3506 | lwz r0,mrEnd(r9) ; Get end of table entry | |
3507 | addi r9,r9,mrSize ; Point to the next slot | |
3a60a9f5 | 3508 | cmplwi cr7,r3,0 ; Are we at the end of the table? |
91447636 A |
3509 | cmplw r2,r5 ; See if we are in this table |
3510 | cmplw cr1,r2,r0 ; Check end also | |
3511 | sub r4,r2,r5 ; Calculate index to physical entry | |
3a60a9f5 | 3512 | beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry... |
91447636 A |
3513 | cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry |
3514 | slwi r4,r4,3 ; Get offset to physical entry | |
3515 | ||
3516 | blt-- mapFLPNitr ; Did not find it... | |
3517 | ||
3518 | add r3,r3,r4 ; Point right to the slot | |
3519 | b mapPhysLock ; Join common lock code | |
3520 | ||
3521 | mapFLPNmiss: | |
3522 | li r3,0 ; Show that we did not find it | |
3523 | blr ; Leave... | |
3524 | ||
3525 | ||
3526 | ; | |
55e303ae A |
3527 | ; mapPhysFindLock - find physent list and lock it |
3528 | ; R31 points to mapping | |
3529 | ; | |
3530 | .align 5 | |
3531 | ||
3532 | mapPhysFindLock: | |
3533 | lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table | |
3534 | lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part) | |
91447636 | 3535 | rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset |
55e303ae A |
3536 | addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry |
3537 | add r3,r3,r4 ; Point to table entry | |
3538 | lwz r5,mpPAddr(r31) ; Get physical page number | |
3539 | lwz r7,mrStart(r3) ; Get the start of range | |
3540 | lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank | |
3541 | sub r6,r5,r7 ; Get index to physent | |
3542 | rlwinm r6,r6,3,0,28 ; Get offset to physent | |
3543 | add r3,r3,r6 ; Point right to the physent | |
3544 | b mapPhysLock ; Join in the lock... | |
3545 | ||
3546 | ; | |
3547 | ; mapPhysLock - lock a physent list | |
3548 | ; R3 contains list header | |
3549 | ; | |
3550 | .align 5 | |
3551 | ||
3552 | mapPhysLockS: | |
3553 | li r2,lgKillResv ; Get a spot to kill reservation | |
3554 | stwcx. r2,0,r2 ; Kill it... | |
3555 | ||
3556 | mapPhysLockT: | |
3557 | lwz r2,ppLink(r3) ; Get physent chain header | |
3558 | rlwinm. r2,r2,0,0,0 ; Is lock clear? | |
3559 | bne-- mapPhysLockT ; Nope, still locked... | |
3560 | ||
3561 | mapPhysLock: | |
3562 | lwarx r2,0,r3 ; Get the lock | |
3563 | rlwinm. r0,r2,0,0,0 ; Is it locked? | |
3564 | oris r0,r2,0x8000 ; Set the lock bit | |
3565 | bne-- mapPhysLockS ; It is locked, spin on it... | |
3566 | stwcx. r0,0,r3 ; Try to stuff it back... | |
3567 | bne-- mapPhysLock ; Collision, try again... | |
3568 | isync ; Clear any speculations | |
3569 | blr ; Leave... | |
3570 | ||
3571 | ||
3572 | ; | |
3573 | ; mapPhysUnlock - unlock a physent list | |
3574 | ; R3 contains list header | |
3575 | ; | |
3576 | .align 5 | |
3577 | ||
3578 | mapPhysUnlock: | |
3579 | lwz r0,ppLink(r3) ; Get physent chain header | |
3580 | rlwinm r0,r0,0,1,31 ; Clear the lock bit | |
3581 | eieio ; Make sure unlock comes last | |
3582 | stw r0,ppLink(r3) ; Unlock the list | |
3583 | blr | |
3584 | ||
3585 | ; | |
3586 | ; mapPhysMerge - merge the RC bits into the master copy | |
3587 | ; R3 points to the physent | |
3588 | ; R4 contains the RC bits | |
3589 | ; | |
3590 | ; Note: we just return if RC is 0 | |
3591 | ; | |
3592 | .align 5 | |
3593 | ||
3594 | mapPhysMerge: | |
3595 | rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits | |
3596 | la r5,ppLink+4(r3) ; Point to the RC field | |
3597 | beqlr-- ; Leave if RC is 0... | |
3598 | ||
3599 | mapPhysMergeT: | |
3600 | lwarx r6,0,r5 ; Get the RC part | |
3601 | or r6,r6,r4 ; Merge in the RC | |
3602 | stwcx. r6,0,r5 ; Try to stuff it back... | |
3603 | bne-- mapPhysMergeT ; Collision, try again... | |
3604 | blr ; Leave... | |
3605 | ||
3606 | ; | |
3607 | ; Sets the physent link pointer and preserves all flags | |
3608 | ; The list is locked | |
3609 | ; R3 points to physent | |
3610 | ; R4 has link to set | |
3611 | ; | |
3612 | ||
3613 | .align 5 | |
3614 | ||
3615 | mapPhyCSet32: | |
3616 | la r5,ppLink+4(r3) ; Point to the link word | |
3617 | ||
3618 | mapPhyCSetR: | |
3619 | lwarx r2,0,r5 ; Get the link and flags | |
91447636 | 3620 | rlwimi r4,r2,0,ppFlags ; Insert the flags |
55e303ae A |
3621 | stwcx. r4,0,r5 ; Stick them back |
3622 | bne-- mapPhyCSetR ; Someone else did something, try again... | |
3623 | blr ; Return... | |
3624 | ||
3625 | .align 5 | |
3626 | ||
3627 | mapPhyCSet64: | |
91447636 A |
3628 | li r0,ppLFAmask ; Get mask to clean up mapping pointer |
3629 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
55e303ae A |
3630 | |
3631 | mapPhyCSet64x: | |
3632 | ldarx r2,0,r3 ; Get the link and flags | |
3633 | and r5,r2,r0 ; Isolate the flags | |
3634 | or r6,r4,r5 ; Add them to the link | |
3635 | stdcx. r6,0,r3 ; Stick them back | |
3636 | bne-- mapPhyCSet64x ; Someone else did something, try again... | |
3637 | blr ; Return... | |
3638 | ||
3639 | ; | |
3640 | ; mapBumpBusy - increment the busy count on a mapping | |
3641 | ; R3 points to mapping | |
3642 | ; | |
3643 | ||
3644 | .align 5 | |
3645 | ||
3646 | mapBumpBusy: | |
3647 | lwarx r4,0,r3 ; Get mpBusy | |
3648 | addis r4,r4,0x0100 ; Bump the busy count | |
3649 | stwcx. r4,0,r3 ; Save it back | |
3650 | bne-- mapBumpBusy ; This did not work, try again... | |
3651 | blr ; Leave... | |
3652 | ||
3653 | ; | |
3654 | ; mapDropBusy - increment the busy count on a mapping | |
3655 | ; R3 points to mapping | |
3656 | ; | |
3657 | ||
3658 | .globl EXT(mapping_drop_busy) | |
3659 | .align 5 | |
3660 | ||
3661 | LEXT(mapping_drop_busy) | |
3662 | mapDropBusy: | |
3663 | lwarx r4,0,r3 ; Get mpBusy | |
3664 | addis r4,r4,0xFF00 ; Drop the busy count | |
3665 | stwcx. r4,0,r3 ; Save it back | |
3666 | bne-- mapDropBusy ; This did not work, try again... | |
3667 | blr ; Leave... | |
3668 | ||
3669 | ; | |
3670 | ; mapDrainBusy - drain the busy count on a mapping | |
3671 | ; R3 points to mapping | |
3672 | ; Note: we already have a busy for ourselves. Only one | |
3673 | ; busy per processor is allowed, so we just spin here | |
3674 | ; waiting for the count to drop to 1. | |
3675 | ; Also, the mapping can not be on any lists when we do this | |
3676 | ; so all we are doing is waiting until it can be released. | |
3677 | ; | |
3678 | ||
3679 | .align 5 | |
3680 | ||
3681 | mapDrainBusy: | |
3682 | lwz r4,mpFlags(r3) ; Get mpBusy | |
3683 | rlwinm r4,r4,8,24,31 ; Clean it up | |
3684 | cmplwi r4,1 ; Is is just our busy? | |
3685 | beqlr++ ; Yeah, it is clear... | |
3686 | b mapDrainBusy ; Try again... | |
3687 | ||
3688 | ||
3689 | ||
3690 | ; | |
3691 | ; handleDSeg - handle a data segment fault | |
3692 | ; handleISeg - handle an instruction segment fault | |
3693 | ; | |
3694 | ; All that we do here is to map these to DSI or ISI and insure | |
3695 | ; that the hash bit is not set. This forces the fault code | |
3696 | ; to also handle the missing segment. | |
3697 | ; | |
3698 | ; At entry R2 contains per_proc, R13 contains savarea pointer, | |
3699 | ; and R11 is the exception code. | |
3700 | ; | |
3701 | ||
3702 | .align 5 | |
3703 | .globl EXT(handleDSeg) | |
3704 | ||
3705 | LEXT(handleDSeg) | |
3706 | ||
3707 | li r11,T_DATA_ACCESS ; Change fault to DSI | |
3708 | stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss | |
3709 | b EXT(handlePF) ; Join common... | |
3710 | ||
3711 | .align 5 | |
3712 | .globl EXT(handleISeg) | |
3713 | ||
3714 | LEXT(handleISeg) | |
3715 | ||
3716 | li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI | |
3717 | stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss | |
3718 | b EXT(handlePF) ; Join common... | |
3719 | ||
3720 | ||
3721 | /* | |
3722 | * handlePF - handle a page fault interruption | |
3723 | * | |
3724 | * At entry R2 contains per_proc, R13 contains savarea pointer, | |
3725 | * and R11 is the exception code. | |
3726 | * | |
3727 | * This first part does a quick check to see if we can handle the fault. | |
3728 | * We canot handle any kind of protection exceptions here, so we pass | |
3729 | * them up to the next level. | |
3730 | * | |
3731 | * NOTE: In order for a page-fault redrive to work, the translation miss | |
3732 | * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur | |
3733 | * before we come here. | |
3734 | */ | |
3735 | ||
3736 | .align 5 | |
3737 | .globl EXT(handlePF) | |
3738 | ||
3739 | LEXT(handlePF) | |
3740 | ||
3741 | mfsprg r12,2 ; Get feature flags | |
3742 | cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction | |
3743 | lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode | |
3744 | mtcrf 0x02,r12 ; move pf64Bit to cr6 | |
3745 | lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here | |
3746 | lwz r18,SAVflags(r13) ; Get the flags | |
3747 | ||
3748 | beq-- gotIfetch ; We have an IFETCH here... | |
3749 | ||
3750 | lwz r27,savedsisr(r13) ; Get the DSISR | |
3751 | lwz r29,savedar(r13) ; Get the first half of the DAR | |
3752 | lwz r30,savedar+4(r13) ; And second half | |
3753 | ||
3754 | b ckIfProt ; Go check if this is a protection fault... | |
3755 | ||
3756 | gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value | |
3757 | lwz r29,savesrr0(r13) ; Get the first half of the instruction address | |
3758 | lwz r30,savesrr0+4(r13) ; And second half | |
3759 | stw r27,savedsisr(r13) ; Save the "constructed" DSISR | |
3760 | ||
3761 | ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception? | |
3762 | li r20,64 ; Set a limit of 64 nests for sanity check | |
3763 | bne-- hpfExit ; Yes... (probably not though) | |
91447636 | 3764 | |
55e303ae A |
3765 | ; |
3766 | ; Note: if the RI is on, we are accessing user space from the kernel, therefore we | |
3767 | ; should be loading the user pmap here. | |
3768 | ; | |
3769 | ||
3770 | andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space? | |
3771 | lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel | |
3772 | mr r19,r2 ; Remember the per_proc | |
3773 | ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address) | |
3774 | mr r23,r30 ; Save the low part of faulting address | |
3775 | beq-- hpfInKern ; Skip if we are in the kernel | |
3776 | la r8,ppUserPmap(r19) ; Point to the current user pmap | |
3777 | ||
3778 | hpfInKern: mr r22,r29 ; Save the high part of faulting address | |
3779 | ||
3780 | bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit... | |
3781 | ||
3782 | ; | |
3783 | ; On 32-bit machines we emulate a segment exception by loading unused SRs with a | |
3784 | ; predefined value that corresponds to no address space. When we see that value | |
3785 | ; we turn off the PTE miss bit in the DSISR to drive the code later on that will | |
3786 | ; cause the proper SR to be loaded. | |
3787 | ; | |
3788 | ||
3789 | lwz r28,4(r8) ; Pick up the pmap | |
3790 | rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive? | |
3791 | mr r25,r28 ; Save the original pmap (in case we nest) | |
91447636 A |
3792 | lwz r0,pmapFlags(r28) ; Get pmap's flags |
3793 | bne hpfGVtest ; Segs are not ours if so... | |
55e303ae A |
3794 | mfsrin r4,r30 ; Get the SR that was used for translation |
3795 | cmplwi r4,invalSpace ; Is this a simulated segment fault? | |
91447636 | 3796 | bne++ hpfGVtest ; No... |
55e303ae A |
3797 | |
3798 | rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR | |
91447636 | 3799 | b hpfGVtest ; Join on up... |
55e303ae A |
3800 | |
3801 | .align 5 | |
3802 | ||
3803 | nop ; Push hpfNest to a 32-byte boundary | |
3804 | nop ; Push hpfNest to a 32-byte boundary | |
3805 | nop ; Push hpfNest to a 32-byte boundary | |
55e303ae A |
3806 | |
3807 | hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit) | |
3808 | mr r25,r28 ; Save the original pmap (in case we nest) | |
91447636 A |
3809 | lwz r0,pmapFlags(r28) ; Get pmap's flags |
3810 | ||
3811 | hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist? | |
3812 | bne hpfGVxlate ; Yup, do accelerated shadow stuff | |
55e303ae A |
3813 | |
3814 | ; | |
3815 | ; This is where we loop descending nested pmaps | |
3816 | ; | |
3817 | ||
3818 | hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3819 | addi r20,r20,-1 ; Count nest try | |
3820 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
3821 | mr. r3,r3 ; Did we get the lock? | |
3822 | bne-- hpfBadLock ; Nope... | |
3823 | ||
3824 | mr r3,r28 ; Get the pmap pointer | |
3825 | mr r4,r22 ; Get top of faulting vaddr | |
3826 | mr r5,r23 ; Get bottom of faulting vaddr | |
3827 | bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags) | |
3828 | ||
3829 | rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one? | |
3830 | mr. r31,r3 ; Save the mapping if we found it | |
3831 | cmplwi cr1,r0,0 ; Check for removal | |
3832 | crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing | |
3833 | ||
3834 | bt-- cr0_eq,hpfNotFound ; Not found or removing... | |
91447636 A |
3835 | |
3836 | rlwinm r0,r7,0,mpType ; Isolate mapping type | |
3837 | cmplwi r0,mpNest ; Are we again nested? | |
3838 | cmplwi cr1,r0,mpLinkage ; Are we a linkage type? | |
3839 | cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type? | |
55e303ae A |
3840 | mr r26,r7 ; Get the flags for this mapping (passed back from search call) |
3841 | ||
3842 | lhz r21,mpSpace(r31) ; Get the space | |
3843 | ||
91447636 | 3844 | bne++ hpfFoundIt ; No, we found our guy... |
55e303ae A |
3845 | |
3846 | ||
3847 | #if pmapTransSize != 12 | |
3848 | #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize | |
3849 | #endif | |
91447636 | 3850 | cmplwi r0,mpLinkage ; Linkage mapping? |
55e303ae | 3851 | cmplwi cr1,r20,0 ; Too many nestings? |
91447636 | 3852 | beq-- hpfSpclNest ; Do we need to do special handling? |
55e303ae A |
3853 | |
3854 | hpfCSrch: lhz r21,mpSpace(r31) ; Get the space | |
3855 | lwz r8,mpNestReloc(r31) ; Get the vaddr relocation | |
3856 | lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half | |
3857 | la r3,pmapSXlk(r28) ; Point to the old pmap search lock | |
3858 | lis r0,0x8000 ; Get 0xFFFFFFFF80000000 | |
3859 | lis r10,hi16(EXT(pmapTrans)) ; Get the translate table | |
3860 | add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit | |
3861 | blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop... | |
3862 | or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit | |
3863 | slwi r11,r21,3 ; Multiply space by 8 | |
3864 | ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part | |
3865 | addc r23,r23,r9 ; Relocate bottom half of vaddr | |
3866 | lwz r10,0(r10) ; Get the actual translation map | |
3867 | slwi r12,r21,2 ; Multiply space by 4 | |
3868 | add r10,r10,r11 ; Add in the higher part of the index | |
3869 | rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit) | |
3870 | adde r22,r22,r8 ; Relocate the top half of the vaddr | |
3871 | add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry | |
3872 | bl sxlkUnlock ; Unlock the search list | |
3873 | ||
91447636 | 3874 | bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines |
55e303ae | 3875 | lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap |
91447636 A |
3876 | cmplwi r28,0 ; Is the pmap paddr valid? |
3877 | bne+ hpfNest ; Nest into new pmap... | |
3878 | b hpfBadPmap ; Handle bad pmap | |
55e303ae | 3879 | |
91447636 | 3880 | hpfGetPmap64: |
55e303ae | 3881 | ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap |
91447636 A |
3882 | cmpldi r28,0 ; Is the pmap paddr valid? |
3883 | bne++ hpfNest ; Nest into new pmap... | |
3884 | b hpfBadPmap ; Handle bad pmap | |
3885 | ||
55e303ae A |
3886 | |
3887 | ; | |
3888 | ; Error condition. We only allow 64 nestings. This keeps us from having to | |
3889 | ; check for recusive nests when we install them. | |
3890 | ; | |
3891 | ||
3892 | .align 5 | |
3893 | ||
3894 | hpfNestTooMuch: | |
3895 | lwz r20,savedsisr(r13) ; Get the DSISR | |
3896 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3897 | bl sxlkUnlock ; Unlock the search list (R3 good from above) | |
3898 | ori r20,r20,1 ; Indicate that there was a nesting problem | |
3899 | stw r20,savedsisr(r13) ; Stash it | |
3900 | lwz r11,saveexception(r13) ; Restore the exception code | |
3901 | b EXT(PFSExit) ; Yes... (probably not though) | |
3902 | ||
3903 | ; | |
3904 | ; Error condition - lock failed - this is fatal | |
3905 | ; | |
3906 | ||
3907 | .align 5 | |
3908 | ||
3909 | hpfBadLock: | |
3910 | lis r0,hi16(Choke) ; System abend | |
3911 | ori r0,r0,lo16(Choke) ; System abend | |
3912 | li r3,failMapping ; Show mapping failure | |
3913 | sc | |
91447636 A |
3914 | |
3915 | ; | |
3916 | ; Error condition - space id selected an invalid pmap - fatal | |
3917 | ; | |
3918 | ||
3919 | .align 5 | |
3920 | ||
3921 | hpfBadPmap: | |
3922 | lis r0,hi16(Choke) ; System abend | |
3923 | ori r0,r0,lo16(Choke) ; System abend | |
3924 | li r3,failPmap ; Show invalid pmap | |
3925 | sc | |
3926 | ||
55e303ae A |
3927 | ; |
3928 | ; Did not find any kind of mapping | |
3929 | ; | |
3930 | ||
3931 | .align 5 | |
3932 | ||
3933 | hpfNotFound: | |
3934 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3935 | bl sxlkUnlock ; Unlock it | |
3936 | lwz r11,saveexception(r13) ; Restore the exception code | |
3937 | ||
3938 | hpfExit: ; We need this because we can not do a relative branch | |
3939 | b EXT(PFSExit) ; Yes... (probably not though) | |
3940 | ||
3941 | ||
3942 | ; | |
3943 | ; Here is where we handle special mappings. So far, the only use is to load a | |
3944 | ; processor specific segment register for copy in/out handling. | |
3945 | ; | |
3946 | ; The only (so far implemented) special map is used for copyin/copyout. | |
3947 | ; We keep a mapping of a "linkage" mapping in the per_proc. | |
3948 | ; The linkage mapping is basically a nested pmap that is switched in | |
3949 | ; as part of context switch. It relocates the appropriate user address | |
3950 | ; space slice into the right place in the kernel. | |
3951 | ; | |
3952 | ||
3953 | .align 5 | |
3954 | ||
3955 | hpfSpclNest: | |
91447636 A |
3956 | la r31,ppUMWmp(r19) ; Just point to the mapping |
3957 | oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here | |
55e303ae A |
3958 | b hpfCSrch ; Go continue search... |
3959 | ||
3960 | ||
3961 | ; | |
3962 | ; We have now found a mapping for the address we faulted on. | |
3963 | ; | |
3964 | ||
3965 | ; | |
3966 | ; Here we go about calculating what the VSID should be. We concatanate | |
3967 | ; the space ID (14 bits wide) 3 times. We then slide the vaddr over | |
3968 | ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID). | |
3969 | ; Then we XOR and expanded space ID and the shifted vaddr. This gives us | |
3970 | ; the VSID. | |
3971 | ; | |
3972 | ; This is used both for segment handling and PTE handling | |
3973 | ; | |
3974 | ||
3975 | ||
3976 | #if maxAdrSpb != 14 | |
3977 | #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!! | |
3978 | #endif | |
3979 | ||
91447636 A |
3980 | ; Important non-volatile registers at this point ('home' means the final pmap/mapping found |
3981 | ; when a multi-level mapping has been successfully searched): | |
3982 | ; r21: home space id number | |
3983 | ; r22: relocated high-order 32 bits of vaddr | |
3984 | ; r23: relocated low-order 32 bits of vaddr | |
3985 | ; r25: pmap physical address | |
3986 | ; r27: dsisr | |
3987 | ; r28: home pmap physical address | |
3988 | ; r29: high-order 32 bits of faulting vaddr | |
3989 | ; r30: low-order 32 bits of faulting vaddr | |
3990 | ; r31: mapping's physical address | |
3991 | ||
55e303ae A |
3992 | .align 5 |
3993 | ||
3994 | hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment | |
91447636 | 3995 | hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment? |
55e303ae A |
3996 | rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID |
3997 | rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order | |
3998 | rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over | |
91447636 | 3999 | rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag |
55e303ae A |
4000 | rlwimi r21,r21,14,4,17 ; Make a second copy of space above first |
4001 | cmplwi cr5,r0,0 ; Did we just do a special nesting? | |
4002 | rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35 | |
4003 | crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest | |
4004 | rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register | |
4005 | xor r14,r14,r20 ; Calculate the top half of VSID | |
4006 | xor r15,r15,r21 ; Calculate the bottom half of the VSID | |
4007 | rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing) | |
4008 | rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry | |
4009 | rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top | |
4010 | rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position | |
4011 | or r12,r12,r15 ; Add key into the bottom of VSID | |
4012 | ; | |
4013 | ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12 | |
4014 | ||
4015 | bne++ hpfPteMiss ; Nope, normal PTE miss... | |
4016 | ||
4017 | ; | |
4018 | ; Here is the only place that we make an entry in the pmap segment cache. | |
4019 | ; | |
4020 | ; Note that we do not make an entry in the segment cache for special | |
4021 | ; nested mappings. This makes the copy in/out segment get refreshed | |
4022 | ; when switching threads. | |
4023 | ; | |
4024 | ; The first thing that we do is to look up the ESID we are going to load | |
4025 | ; into a segment in the pmap cache. If it is already there, this is | |
4026 | ; a segment that appeared since the last time we switched address spaces. | |
4027 | ; If all is correct, then it was another processors that made the cache | |
4028 | ; entry. If not, well, it is an error that we should die on, but I have | |
4029 | ; not figured a good way to trap it yet. | |
4030 | ; | |
4031 | ; If we get a hit, we just bail, otherwise, lock the pmap cache, select | |
4032 | ; an entry based on the generation number, update the cache entry, and | |
4033 | ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit | |
4034 | ; entries that correspond to the last 4 bits (32:35 for 64-bit and | |
4035 | ; 0:3 for 32-bit) of the ESID. | |
4036 | ; | |
4037 | ; Then we unlock and bail. | |
4038 | ; | |
4039 | ; First lock it. Then select a free slot or steal one based on the generation | |
4040 | ; number. Then store it, update the allocation flags, and unlock. | |
4041 | ; | |
4042 | ; The cache entry contains an image of the ESID/VSID pair we would load for | |
4043 | ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image. | |
4044 | ; | |
4045 | ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not | |
4046 | ; the current one, which may have changed because we nested. | |
4047 | ; | |
4048 | ; Also remember that we do not store the valid bit in the ESID. If we | |
4049 | ; od, this will break some other stuff. | |
4050 | ; | |
4051 | ||
4052 | bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault.... | |
4053 | ||
4054 | mr r3,r25 ; Point to the pmap | |
37839358 A |
4055 | mr r4,r29 ; ESID high half |
4056 | mr r5,r30 ; ESID low half | |
55e303ae A |
4057 | bl pmapCacheLookup ; Go see if this is in the cache already |
4058 | ||
4059 | mr. r3,r3 ; Did we find it? | |
4060 | mr r4,r11 ; Copy this to a different register | |
4061 | ||
4062 | bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry... | |
4063 | ||
4064 | lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table | |
4065 | lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table | |
4066 | ||
4067 | cntlzw r7,r4 ; Find a free slot | |
4068 | ||
4069 | subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one | |
4070 | rlwinm r30,r30,0,0,3 ; Clean up the ESID | |
4071 | srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not | |
4072 | addi r5,r4,1 ; Bump the generation number | |
4073 | and r7,r7,r6 ; Clear bit number if none empty | |
4074 | andc r8,r4,r6 ; Clear generation count if we found an empty | |
4075 | rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word | |
4076 | or r7,r7,r8 ; Select a slot number | |
4077 | li r8,0 ; Clear | |
4078 | andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using | |
4079 | oris r8,r8,0x8000 ; Get the high bit on | |
4080 | la r9,pmapSegCache(r25) ; Point to the segment cache | |
4081 | slwi r6,r7,4 ; Get index into the segment cache | |
4082 | slwi r2,r7,2 ; Get index into the segment cache sub-tag index | |
4083 | srw r8,r8,r7 ; Get the mask | |
4084 | cmplwi r2,32 ; See if we are in the first or second half of sub-tag | |
4085 | li r0,0 ; Clear | |
4086 | rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out | |
4087 | oris r0,r0,0xF000 ; Get the sub-tag mask | |
4088 | add r9,r9,r6 ; Point to the cache slot | |
4089 | srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half) | |
4090 | srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half) | |
4091 | ||
4092 | stw r29,sgcESID(r9) ; Save the top of the ESID | |
4093 | andc r10,r10,r0 ; Clear sub-tag slot in case we are in top | |
4094 | andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom | |
4095 | stw r30,sgcESID+4(r9) ; Save the bottom of the ESID | |
4096 | or r10,r10,r5 ; Stick in subtag in case top half | |
4097 | or r11,r11,r5 ; Stick in subtag in case bottom half | |
4098 | stw r14,sgcVSID(r9) ; Save the top of the VSID | |
4099 | andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated | |
4100 | stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key | |
4101 | bge hpfSCSTbottom ; Go save the bottom part of sub-tag | |
4102 | ||
4103 | stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag | |
4104 | b hpfNoCacheEnt ; Go finish up... | |
4105 | ||
4106 | hpfSCSTbottom: | |
4107 | stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag | |
4108 | ||
4109 | ||
4110 | hpfNoCacheEnt: | |
4111 | eieio ; Make sure cache is updated before lock | |
4112 | stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number | |
4113 | ||
4114 | ||
4115 | hpfNoCacheEnt2: | |
4116 | lwz r4,ppMapFlags(r19) ; Get the protection key modifier | |
4117 | bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment... | |
4118 | ||
4119 | ; | |
4120 | ; Make and enter 32-bit segment register | |
4121 | ; | |
4122 | ||
4123 | lwz r16,validSegs(r19) ; Get the valid SR flags | |
4124 | xor r12,r12,r4 ; Alter the storage key before loading segment register | |
4125 | rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting | |
4126 | rlwinm r6,r12,19,1,3 ; Insert the keys and N bit | |
4127 | lis r0,0x8000 ; Set bit 0 | |
4128 | rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID | |
4129 | srw r0,r0,r2 ; Get bit corresponding to SR | |
4130 | rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents | |
4131 | or r16,r16,r0 ; Show that SR is valid | |
4132 | ||
4133 | mtsrin r6,r30 ; Set the actual SR | |
4134 | ||
4135 | stw r16,validSegs(r19) ; Set the valid SR flags | |
4136 | ||
4137 | b hpfPteMiss ; SR loaded, go do a PTE... | |
4138 | ||
4139 | ; | |
4140 | ; Make and enter 64-bit segment look-aside buffer entry. | |
4141 | ; Note that the cache entry is the right format except for valid bit. | |
4142 | ; We also need to convert from long long to 64-bit register values. | |
4143 | ; | |
4144 | ||
4145 | ||
4146 | .align 5 | |
4147 | ||
4148 | hpfLoadSeg64: | |
4149 | ld r16,validSegs(r19) ; Get the valid SLB entry flags | |
4150 | sldi r8,r29,32 ; Move high order address over | |
4151 | sldi r10,r14,32 ; Move high part of VSID over | |
4152 | ||
4153 | not r3,r16 ; Make valids be 0s | |
4154 | li r0,1 ; Prepare to set bit 0 | |
4155 | ||
4156 | cntlzd r17,r3 ; Find a free SLB | |
4157 | xor r12,r12,r4 ; Alter the storage key before loading segment table entry | |
4158 | or r9,r8,r30 ; Form full 64-bit address | |
4159 | cmplwi r17,63 ; Did we find a free SLB entry? | |
4160 | sldi r0,r0,63 ; Get bit 0 set | |
4161 | or r10,r10,r12 ; Move in low part and keys | |
4162 | addi r17,r17,1 ; Skip SLB 0 always | |
4163 | blt++ hpfFreeSeg ; Yes, go load it... | |
4164 | ||
4165 | ; | |
4166 | ; No free SLB entries, select one that is in use and invalidate it | |
4167 | ; | |
4168 | lwz r4,ppSegSteal(r19) ; Get the next slot to steal | |
4169 | addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only | |
4170 | addi r4,r4,1 ; Set next slot to steal | |
4171 | slbmfee r7,r17 ; Get the entry that is in the selected spot | |
4172 | subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap | |
4173 | rldicr r7,r7,0,35 ; Clear the valid bit and the rest | |
4174 | srawi r2,r2,31 ; Get -1 if steal index still in range | |
4175 | slbie r7 ; Invalidate the in-use SLB entry | |
4176 | and r4,r4,r2 ; Reset steal index when it should wrap | |
4177 | isync ; | |
4178 | ||
4179 | stw r4,ppSegSteal(r19) ; Set the next slot to steal | |
4180 | ; | |
4181 | ; We are now ready to stick the SLB entry in the SLB and mark it in use | |
4182 | ; | |
4183 | ||
4184 | hpfFreeSeg: | |
4185 | subi r4,r17,1 ; Adjust shift to account for skipping slb 0 | |
4186 | mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear | |
4187 | srd r0,r0,r4 ; Set bit mask for allocation | |
4188 | oris r9,r9,0x0800 ; Turn on the valid bit | |
4189 | or r16,r16,r0 ; Turn on the allocation flag | |
4190 | rldimi r9,r17,0,58 ; Copy in the SLB entry selector | |
4191 | ||
4192 | beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest... | |
4193 | slbie r7 ; Blow away a potential duplicate | |
4194 | ||
4195 | hpfNoBlow: slbmte r10,r9 ; Make that SLB entry | |
4196 | ||
4197 | std r16,validSegs(r19) ; Mark as valid | |
4198 | b hpfPteMiss ; STE loaded, go do a PTE... | |
4199 | ||
4200 | ; | |
4201 | ; The segment has been set up and loaded if need be. Now we are ready to build the | |
4202 | ; PTE and get it into the hash table. | |
4203 | ; | |
4204 | ; Note that there is actually a race here. If we start fault processing on | |
4205 | ; a different pmap, i.e., we have descended into a nested pmap, it is possible | |
4206 | ; that the nest could have been removed from the original pmap. We would | |
4207 | ; succeed with this translation anyway. I do not think we need to worry | |
4208 | ; about this (famous last words) because nobody should be unnesting anything | |
4209 | ; if there are still people activily using them. It should be up to the | |
4210 | ; higher level VM system to put the kibosh on this. | |
4211 | ; | |
4212 | ; There is also another race here: if we fault on the same mapping on more than | |
4213 | ; one processor at the same time, we could end up with multiple PTEs for the same | |
4214 | ; mapping. This is not a good thing.... We really only need one of the | |
4215 | ; fault handlers to finish, so what we do is to set a "fault in progress" flag in | |
4216 | ; the mapping. If we see that set, we just abandon the handler and hope that by | |
4217 | ; the time we restore context and restart the interrupted code, the fault has | |
4218 | ; been resolved by the other guy. If not, we will take another fault. | |
4219 | ; | |
4220 | ||
4221 | ; | |
4222 | ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not. | |
4223 | ; It is required to stay there until after we call mapSelSlot!!!! | |
4224 | ; | |
4225 | ||
4226 | .align 5 | |
4227 | ||
4228 | hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field | |
4229 | lwz r12,mpPte(r31) ; Get the quick pointer to PTE | |
4230 | li r3,mpHValid ; Get the PTE valid bit | |
4231 | andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side? | |
4232 | ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag | |
4233 | crnot cr1_eq,cr0_eq ; Remember if FIP was on | |
4234 | and. r12,r12,r3 ; Isolate the valid bit | |
4235 | crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail... | |
4236 | beq-- hpfAbandon ; Yes, other processor is or already has handled this... | |
91447636 A |
4237 | rlwinm r0,r2,0,mpType ; Isolate mapping type |
4238 | cmplwi r0,mpBlock ; Is this a block mapping? | |
4239 | crnot cr7_eq,cr0_eq ; Remember if we have a block mapping | |
55e303ae A |
4240 | stwcx. r2,0,r31 ; Store the flags |
4241 | bne-- hpfPteMiss ; Collision, try again... | |
4242 | ||
4243 | bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff... | |
4244 | ||
4245 | ; | |
4246 | ; At this point we are about to do the 32-bit PTE generation. | |
4247 | ; | |
4248 | ; The following is the R14:R15 pair that contains the "shifted" VSID: | |
4249 | ; | |
4250 | ; 1 2 3 4 4 5 6 | |
4251 | ; 0 8 6 4 2 0 8 6 3 | |
4252 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4253 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| | |
4254 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4255 | ; | |
4256 | ; The 24 bits of the 32-bit architecture VSID is in the following: | |
4257 | ; | |
4258 | ; 1 2 3 4 4 5 6 | |
4259 | ; 0 8 6 4 2 0 8 6 3 | |
4260 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4261 | ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| | |
4262 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4263 | ; | |
4264 | ||
4265 | ||
4266 | hpfBldPTE32: | |
4267 | lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion) | |
4268 | lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping | |
4269 | ||
4270 | mfsdr1 r27 ; Get the hash table base address | |
4271 | ||
4272 | rlwinm r0,r23,0,4,19 ; Isolate just the page index | |
4273 | rlwinm r18,r23,10,26,31 ; Extract the API | |
4274 | xor r19,r15,r0 ; Calculate hash << 12 | |
4275 | mr r2,r25 ; Save the flag part of the mapping | |
4276 | rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image | |
4277 | rlwinm r16,r27,16,7,15 ; Extract the hash table size | |
4278 | rlwinm r25,r25,0,0,19 ; Clear out the flags | |
4279 | slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported) | |
4280 | sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map) | |
4281 | ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask | |
4282 | rlwinm r27,r27,0,0,15 ; Extract the hash table base | |
4283 | rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table | |
4284 | add r24,r24,r25 ; Adjust to true physical address | |
4285 | rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image | |
4286 | rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot | |
4287 | and r19,r19,r16 ; Wrap hash table offset into the hash table | |
4288 | ori r24,r24,lo16(mpR) ; Turn on the reference bit right now | |
4289 | rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA | |
4290 | add r19,r19,r27 ; Point to the PTEG | |
4291 | subfic r20,r20,-4 ; Get negative offset to PCA | |
4292 | oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on | |
4293 | add r20,r20,r27 ; Point to the PCA slot | |
4294 | ||
4295 | ; | |
4296 | ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower. | |
4297 | ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA. | |
4298 | ; | |
4299 | ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible | |
4300 | ; that some other processor beat us and stuck in a PTE or that | |
4301 | ; all we had was a simple segment exception and the PTE was there the whole time. | |
4302 | ; If we find one a pointer, we are done. | |
4303 | ; | |
4304 | ||
4305 | mr r7,r20 ; Copy the PCA pointer | |
4306 | bl mapLockPteg ; Lock the PTEG | |
4307 | ||
4308 | lwz r12,mpPte(r31) ; Get the offset to the PTE | |
4309 | mr r17,r6 ; Remember the PCA image | |
4310 | mr r16,r6 ; Prime the post-select PCA image | |
4311 | andi. r0,r12,mpHValid ; Is there a PTE here already? | |
4312 | li r21,8 ; Get the number of slots | |
4313 | ||
4314 | bne- cr7,hpfNoPte32 ; Skip this for a block mapping... | |
4315 | ||
4316 | bne- hpfBailOut ; Someone already did this for us... | |
4317 | ||
4318 | ; | |
91447636 | 4319 | ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a |
55e303ae A |
4320 | ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was |
4321 | ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE. | |
4322 | ; R4 returns the slot index. | |
4323 | ; | |
4324 | ; REMEMBER: CR7 indicates that we are building a block mapping. | |
4325 | ; | |
4326 | ||
4327 | hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots | |
4328 | mr r6,r17 ; Get back the original PCA | |
4329 | rlwimi r6,r16,0,8,15 ; Insert the updated steal slot | |
4330 | blt- hpfBailOut ; Holy Cow, all slots are locked... | |
4331 | ||
4332 | bl mapSelSlot ; Go select a slot (note that the PCA image is already set up) | |
4333 | ||
a3d08fcd A |
4334 | cmplwi cr5,r3,1 ; Did we steal a slot? |
4335 | rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address | |
55e303ae A |
4336 | mr r16,r6 ; Remember the PCA image after selection |
4337 | blt+ cr5,hpfInser32 ; Nope, no steal... | |
4338 | ||
4339 | lwz r6,0(r19) ; Get the old PTE | |
4340 | lwz r7,4(r19) ; Get the real part of the stealee | |
4341 | rlwinm r6,r6,0,1,31 ; Clear the valid bit | |
4342 | bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping... | |
4343 | srwi r3,r7,12 ; Change phys address to a ppnum | |
4344 | bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page) | |
4345 | cmplwi cr1,r3,0 ; Check if this is in RAM | |
4346 | bne- hpfNoPte32 ; Could not get it, try for another... | |
4347 | ||
4348 | crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map | |
4349 | ||
4350 | hpfNipBM: stw r6,0(r19) ; Set the invalid PTE | |
4351 | ||
4352 | sync ; Make sure the invalid is stored | |
4353 | li r9,tlbieLock ; Get the TLBIE lock | |
4354 | rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part | |
4355 | ||
4356 | hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock | |
4357 | mfsprg r4,0 ; Get the per_proc | |
4358 | rlwinm r8,r6,25,18,31 ; Extract the space ID | |
4359 | rlwinm r11,r6,25,18,31 ; Extract the space ID | |
4360 | lwz r7,hwSteals(r4) ; Get the steal count | |
4361 | srwi r2,r6,7 ; Align segment number with hash | |
4362 | rlwimi r11,r11,14,4,17 ; Get copy above ourselves | |
4363 | mr. r0,r0 ; Is it locked? | |
4364 | srwi r0,r19,6 ; Align PTEG offset for back hash | |
4365 | xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits) | |
4366 | xor r11,r11,r0 ; Hash backwards to partial vaddr | |
4367 | rlwinm r12,r2,14,0,3 ; Shift segment up | |
4368 | mfsprg r2,2 ; Get feature flags | |
4369 | li r0,1 ; Get our lock word | |
4370 | rlwimi r12,r6,22,4,9 ; Move up the API | |
4371 | bne- hpfTLBIE32 ; It is locked, go wait... | |
4372 | rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr | |
4373 | ||
4374 | stwcx. r0,0,r9 ; Try to get it | |
4375 | bne- hpfTLBIE32 ; We was beat... | |
4376 | addi r7,r7,1 ; Bump the steal count | |
4377 | ||
4378 | rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box? | |
4379 | li r0,0 ; Lock clear value | |
4380 | ||
4381 | tlbie r12 ; Invalidate it everywhere | |
4382 | ||
91447636 | 4383 | |
55e303ae A |
4384 | beq- hpfNoTS32 ; Can not have MP on this machine... |
4385 | ||
4386 | eieio ; Make sure that the tlbie happens first | |
4387 | tlbsync ; Wait for everyone to catch up | |
4388 | sync ; Make sure of it all | |
91447636 A |
4389 | |
4390 | hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock | |
5eebf738 A |
4391 | |
4392 | stw r7,hwSteals(r4) ; Save the steal count | |
55e303ae A |
4393 | bgt cr5,hpfInser32 ; We just stole a block mapping... |
4394 | ||
4395 | lwz r4,4(r19) ; Get the RC of the just invalidated PTE | |
4396 | ||
4397 | la r11,ppLink+4(r3) ; Point to the master RC copy | |
4398 | lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping | |
4399 | rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC | |
4400 | ||
4401 | hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC | |
4402 | or r0,r0,r2 ; Merge in the new RC | |
4403 | stwcx. r0,0,r11 ; Try to stick it back | |
4404 | bne- hpfMrgRC32 ; Try again if we collided... | |
4405 | ||
4406 | ||
91447636 | 4407 | hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address |
55e303ae A |
4408 | beq- hpfLostPhys ; We could not find our mapping. Kick the bucket... |
4409 | ||
4410 | lhz r10,mpSpace(r7) ; Get the space | |
4411 | lwz r9,mpVAddr+4(r7) ; And the vaddr | |
4412 | cmplw cr1,r10,r8 ; Is this one of ours? | |
4413 | xor r9,r12,r9 ; Compare virtual address | |
4414 | cmplwi r9,0x1000 ; See if we really match | |
4415 | crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match | |
4416 | beq+ hpfFPnch2 ; Yes, found ours... | |
4417 | ||
4418 | lwz r7,mpAlias+4(r7) ; Chain on to the next | |
4419 | b hpfFPnch ; Check it out... | |
4420 | ||
4421 | hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG | |
4422 | stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG) | |
4423 | bl mapPhysUnlock ; Unlock the physent now | |
4424 | ||
4425 | hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on | |
4426 | ||
4427 | stw r24,4(r19) ; Stuff in the real part of the PTE | |
4428 | eieio ; Make sure this gets there first | |
4429 | ||
4430 | stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid | |
4431 | mr r17,r16 ; Get the PCA image to save | |
4432 | b hpfFinish ; Go join the common exit code... | |
4433 | ||
4434 | ||
4435 | ; | |
4436 | ; At this point we are about to do the 64-bit PTE generation. | |
4437 | ; | |
4438 | ; The following is the R14:R15 pair that contains the "shifted" VSID: | |
4439 | ; | |
4440 | ; 1 2 3 4 4 5 6 | |
4441 | ; 0 8 6 4 2 0 8 6 3 | |
4442 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4443 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| | |
4444 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4445 | ; | |
4446 | ; | |
4447 | ||
4448 | .align 5 | |
4449 | ||
4450 | hpfBldPTE64: | |
4451 | ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping | |
4452 | lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping | |
4453 | ||
4454 | mfsdr1 r27 ; Get the hash table base address | |
4455 | ||
4456 | sldi r11,r22,32 ; Slide top of adjusted EA over | |
4457 | sldi r14,r14,32 ; Slide top of VSID over | |
4458 | rlwinm r5,r27,0,27,31 ; Isolate the size | |
4459 | eqv r16,r16,r16 ; Get all foxes here | |
4460 | rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN | |
4461 | mr r2,r10 ; Save the flag part of the mapping | |
4462 | or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value | |
4463 | rldicr r27,r27,0,45 ; Clean up the hash table base | |
4464 | or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value | |
4465 | rlwinm r0,r11,0,4,19 ; Clear out everything but the page | |
4466 | subfic r5,r5,46 ; Get number of leading zeros | |
4467 | xor r19,r0,r15 ; Calculate hash | |
4468 | ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE | |
4469 | srd r16,r16,r5 ; Shift over to get length of table | |
4470 | srdi r19,r19,5 ; Convert page offset to hash table offset | |
4471 | rldicr r16,r16,0,56 ; Clean up lower bits in hash table size | |
4472 | rldicr r10,r10,0,51 ; Clear out flags | |
4473 | sldi r24,r24,12 ; Change ppnum to physical address | |
4474 | sub r11,r11,r10 ; Get the offset from the base mapping | |
4475 | and r19,r19,r16 ; Wrap into hash table | |
4476 | add r24,r24,r11 ; Get actual physical address of this page | |
4477 | srdi r20,r19,5 ; Convert PTEG offset to PCA offset | |
4478 | rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc. | |
4479 | subfic r20,r20,-4 ; Get negative offset to PCA | |
4480 | ori r24,r24,lo16(mpR) ; Force on the reference bit | |
4481 | add r20,r20,r27 ; Point to the PCA slot | |
4482 | add r19,r19,r27 ; Point to the PTEG | |
4483 | ||
4484 | ; | |
4485 | ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower. | |
4486 | ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA. | |
4487 | ; | |
4488 | ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible | |
4489 | ; that some other processor beat us and stuck in a PTE or that | |
4490 | ; all we had was a simple segment exception and the PTE was there the whole time. | |
4491 | ; If we find one a pointer, we are done. | |
4492 | ; | |
4493 | ||
4494 | mr r7,r20 ; Copy the PCA pointer | |
4495 | bl mapLockPteg ; Lock the PTEG | |
4496 | ||
4497 | lwz r12,mpPte(r31) ; Get the offset to the PTE | |
4498 | mr r17,r6 ; Remember the PCA image | |
4499 | mr r18,r6 ; Prime post-selection PCA image | |
4500 | andi. r0,r12,mpHValid ; See if we have a PTE now | |
4501 | li r21,8 ; Get the number of slots | |
4502 | ||
4503 | bne-- cr7,hpfNoPte64 ; Skip this for a block mapping... | |
4504 | ||
4505 | bne-- hpfBailOut ; Someone already did this for us... | |
4506 | ||
4507 | ; | |
4508 | ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a | |
4509 | ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was | |
4510 | ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE. | |
4511 | ; R4 returns the slot index. | |
4512 | ; | |
4513 | ; REMEMBER: CR7 indicates that we are building a block mapping. | |
4514 | ; | |
4515 | ||
4516 | hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots | |
4517 | mr r6,r17 ; Restore original state of PCA | |
4518 | rlwimi r6,r18,0,8,15 ; Insert the updated steal slot | |
4519 | blt- hpfBailOut ; Holy Cow, all slots are locked... | |
4520 | ||
4521 | bl mapSelSlot ; Go select a slot | |
4522 | ||
4523 | cmplwi cr5,r3,1 ; Did we steal a slot? | |
55e303ae | 4524 | mr r18,r6 ; Remember the PCA image after selection |
a3d08fcd | 4525 | insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address |
55e303ae A |
4526 | lwz r10,hwSteals(r2) ; Get the steal count |
4527 | blt++ cr5,hpfInser64 ; Nope, no steal... | |
4528 | ||
4529 | ld r6,0(r19) ; Get the old PTE | |
4530 | ld r7,8(r19) ; Get the real part of the stealee | |
4531 | rldicr r6,r6,0,62 ; Clear the valid bit | |
4532 | bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping... | |
4533 | srdi r3,r7,12 ; Change page address to a page address | |
4534 | bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page) | |
4535 | cmplwi cr1,r3,0 ; Check if this is in RAM | |
4536 | bne-- hpfNoPte64 ; Could not get it, try for another... | |
4537 | ||
4538 | crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map | |
4539 | ||
4540 | hpfNipBMx: std r6,0(r19) ; Set the invalid PTE | |
4541 | li r9,tlbieLock ; Get the TLBIE lock | |
4542 | ||
4543 | srdi r11,r6,5 ; Shift VSID over for back hash | |
4544 | mfsprg r4,0 ; Get the per_proc | |
4545 | xor r11,r11,r19 ; Hash backwards to get low bits of VPN | |
4546 | sync ; Make sure the invalid is stored | |
4547 | ||
4548 | sldi r12,r6,16 ; Move AVPN to EA position | |
4549 | sldi r11,r11,5 ; Move this to the page position | |
4550 | ||
4551 | hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock | |
4552 | mr. r0,r0 ; Is it locked? | |
4553 | li r0,1 ; Get our lock word | |
4554 | bne-- hpfTLBIE65 ; It is locked, go wait... | |
4555 | ||
4556 | stwcx. r0,0,r9 ; Try to get it | |
4557 | rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN | |
4558 | rldicl r8,r6,52,50 ; Isolate the address space ID | |
4559 | bne-- hpfTLBIE64 ; We was beat... | |
4560 | addi r10,r10,1 ; Bump the steal count | |
4561 | ||
4562 | rldicl r11,r12,0,16 ; Clear cause the book says so | |
4563 | li r0,0 ; Lock clear value | |
4564 | ||
4565 | tlbie r11 ; Invalidate it everywhere | |
4566 | ||
55e303ae A |
4567 | mr r7,r8 ; Get a copy of the space ID |
4568 | eieio ; Make sure that the tlbie happens first | |
4569 | rldimi r7,r7,14,36 ; Copy address space to make hash value | |
4570 | tlbsync ; Wait for everyone to catch up | |
4571 | rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top | |
55e303ae A |
4572 | srdi r2,r6,26 ; Shift original segment down to bottom |
4573 | ||
4574 | ptesync ; Make sure of it all | |
4575 | xor r7,r7,r2 ; Compute original segment | |
91447636 | 4576 | stw r0,tlbieLock(0) ; Clear the tlbie lock |
55e303ae A |
4577 | |
4578 | stw r10,hwSteals(r4) ; Save the steal count | |
4579 | bgt cr5,hpfInser64 ; We just stole a block mapping... | |
4580 | ||
4581 | rldimi r12,r7,28,0 ; Insert decoded segment | |
4582 | rldicl r4,r12,0,13 ; Trim to max supported address | |
4583 | ||
4584 | ld r12,8(r19) ; Get the RC of the just invalidated PTE | |
4585 | ||
4586 | la r11,ppLink+4(r3) ; Point to the master RC copy | |
4587 | ld r7,ppLink(r3) ; Grab the pointer to the first mapping | |
4588 | rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC | |
4589 | ||
4590 | hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC | |
91447636 | 4591 | li r12,ppLFAmask ; Get mask to clean up alias pointer |
55e303ae | 4592 | or r0,r0,r2 ; Merge in the new RC |
91447636 | 4593 | rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
4594 | stwcx. r0,0,r11 ; Try to stick it back |
4595 | bne-- hpfMrgRC64 ; Try again if we collided... | |
4596 | ||
4597 | hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address | |
4598 | beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket... | |
4599 | ||
4600 | lhz r10,mpSpace(r7) ; Get the space | |
4601 | ld r9,mpVAddr(r7) ; And the vaddr | |
4602 | cmplw cr1,r10,r8 ; Is this one of ours? | |
4603 | xor r9,r4,r9 ; Compare virtual address | |
4604 | cmpldi r9,0x1000 ; See if we really match | |
4605 | crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match | |
4606 | beq++ hpfFPnch2x ; Yes, found ours... | |
4607 | ||
4608 | ld r7,mpAlias(r7) ; Chain on to the next | |
4609 | b hpfFPnchx ; Check it out... | |
4610 | ||
4611 | .align 5 | |
4612 | ||
4613 | hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area | |
4614 | stwcx. r7,0,r7 ; Kill reservation | |
4615 | ||
4616 | hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock | |
4617 | mr. r0,r0 ; Is it locked? | |
4618 | beq++ hpfTLBIE64 ; Yup, wait for it... | |
4619 | b hpfTLBIE63 ; Nope, try again.. | |
4620 | ||
4621 | ||
4622 | ||
4623 | hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG | |
4624 | stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though) | |
4625 | bl mapPhysUnlock ; Unlock the physent now | |
4626 | ||
4627 | ||
4628 | hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE | |
4629 | eieio ; Make sure this gets there first | |
4630 | std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid | |
4631 | mr r17,r18 ; Get the PCA image to set | |
4632 | b hpfFinish ; Go join the common exit code... | |
4633 | ||
4634 | hpfLostPhys: | |
4635 | lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead | |
4636 | ori r0,r0,lo16(Choke) ; System abend | |
4637 | sc | |
4638 | ||
4639 | ; | |
4640 | ; This is the common code we execute when we are finished setting up the PTE. | |
4641 | ; | |
4642 | ||
4643 | .align 5 | |
4644 | ||
4645 | hpfFinish: sub r4,r19,r27 ; Get offset of PTE | |
4646 | ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset | |
4647 | bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map | |
4648 | stw r4,mpPte(r31) ; Remember our PTE | |
4649 | ||
4650 | hpfBailOut: eieio ; Make sure all updates come first | |
4651 | stw r17,0(r20) ; Unlock and set the final PCA | |
4652 | ||
4653 | ; | |
4654 | ; This is where we go if we have started processing the fault, but find that someone | |
4655 | ; else has taken care of it. | |
4656 | ; | |
4657 | ||
4658 | hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags | |
4659 | rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag | |
4660 | sth r2,mpFlags+2(r31) ; Set it | |
4661 | ||
4662 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
4663 | bl sxlkUnlock ; Unlock the search list | |
4664 | ||
4665 | li r11,T_IN_VAIN ; Say that it was handled | |
4666 | b EXT(PFSExit) ; Leave... | |
4667 | ||
4668 | ; | |
4669 | ; This is where we go when we find that someone else | |
4670 | ; is in the process of handling the fault. | |
4671 | ; | |
4672 | ||
4673 | hpfAbandon: li r3,lgKillResv ; Kill off any reservation | |
4674 | stwcx. r3,0,r3 ; Do it | |
4675 | ||
4676 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
4677 | bl sxlkUnlock ; Unlock the search list | |
4678 | ||
4679 | li r11,T_IN_VAIN ; Say that it was handled | |
4680 | b EXT(PFSExit) ; Leave... | |
4681 | ||
91447636 A |
4682 | ; |
4683 | ; Guest shadow assist -- page fault handler | |
4684 | ; | |
4685 | ; Here we handle a fault in a guest pmap that has the guest shadow mapping | |
4686 | ; assist active. We locate the VMM pmap extension block, which contains an | |
4687 | ; index over the discontiguous multi-page shadow hash table. The index | |
4688 | ; corresponding to our vaddr is selected, and the selected group within | |
4689 | ; that page is searched for a valid and active entry that contains | |
4690 | ; our vaddr and space id. The search is pipelined, so that we may fetch | |
4691 | ; the next slot while examining the current slot for a hit. The final | |
4692 | ; search iteration is unrolled so that we don't fetch beyond the end of | |
4693 | ; our group, which could have dire consequences depending upon where the | |
4694 | ; physical hash page is located. | |
4695 | ; | |
4696 | ; The VMM pmap extension block occupies a page. Begining at offset 0, we | |
4697 | ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary | |
4698 | ; after the pmap_vmm_ext is the hash table physical address index, a | |
4699 | ; linear list of 64-bit physical addresses of the pages that comprise | |
4700 | ; the hash table. | |
4701 | ; | |
4702 | ; In the event that we succesfully locate a guest mapping, we re-join | |
4703 | ; the page fault path at hpfGVfound with the mapping's address in r31; | |
4704 | ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding | |
4705 | ; a share of the pmap search lock for the host pmap with the host pmap's | |
4706 | ; address in r28, the guest pmap's space id in r21, and the guest pmap's | |
4707 | ; flags in r12. | |
4708 | ; | |
4709 | ||
4710 | .align 5 | |
4711 | hpfGVxlate: | |
4712 | bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine | |
4713 | ||
4714 | lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr | |
4715 | lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags | |
4716 | lwz r21,pmapSpace(r28) ; r21 <- guest space ID number | |
4717 | lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr | |
4718 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
4719 | rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr | |
4720 | lwz r6,vxsGpf(r11) ; Get guest fault count | |
4721 | ||
4722 | srwi r3,r10,12 ; Form shadow hash: | |
4723 | xor r3,r3,r21 ; spaceID ^ (vaddr >> 12) | |
4724 | rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
4725 | ; Form index offset from hash page number | |
4726 | add r31,r31,r4 ; r31 <- hash page index entry | |
4727 | lwz r31,4(r31) ; r31 <- hash page paddr | |
4728 | rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK | |
4729 | ; r31 <- hash group paddr | |
4730 | ||
4731 | la r3,pmapSXlk(r28) ; Point to the host pmap's search lock | |
4732 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
4733 | mr. r3,r3 ; Did we get the lock? | |
4734 | bne- hpfBadLock ; Nope... | |
4735 | ||
4736 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
4737 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
4738 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
4739 | addi r6,r6,1 ; Increment guest fault count | |
4740 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
4741 | mtctr r0 ; in this group | |
4742 | stw r6,vxsGpf(r11) ; Update guest fault count | |
4743 | b hpfGVlp32 | |
4744 | ||
4745 | .align 5 | |
4746 | hpfGVlp32: | |
4747 | mr r6,r3 ; r6 <- current mapping slot's flags | |
4748 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
4749 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
4750 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
4751 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
4752 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
4753 | andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
4754 | xor r7,r7,r21 ; Compare space ID | |
4755 | or r0,r6,r7 ; r0 <- !(!free && !dormant && space match) | |
4756 | xor r8,r8,r10 ; Compare virtual address | |
4757 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4758 | beq hpfGVfound ; Join common patch on hit (r31 points to mapping) | |
4759 | ||
4760 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
4761 | bdnz hpfGVlp32 ; Iterate | |
4762 | ||
4763 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
4764 | andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag | |
4765 | xor r4,r4,r21 ; Compare space ID | |
4766 | or r0,r3,r4 ; r0 <- !(!free && !dormant && space match) | |
4767 | xor r5,r5,r10 ; Compare virtual address | |
4768 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4769 | beq hpfGVfound ; Join common patch on hit (r31 points to mapping) | |
4770 | ||
4771 | b hpfGVmiss | |
4772 | ||
4773 | .align 5 | |
4774 | hpfGV64: | |
4775 | ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr | |
4776 | lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags | |
4777 | lwz r21,pmapSpace(r28) ; r21 <- guest space ID number | |
4778 | ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr | |
4779 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
4780 | rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr | |
4781 | rldimi r10,r29,32,0 ; cleaning up low-order 12 bits | |
4782 | lwz r6,vxsGpf(r11) ; Get guest fault count | |
4783 | ||
4784 | srwi r3,r10,12 ; Form shadow hash: | |
4785 | xor r3,r3,r21 ; spaceID ^ (vaddr >> 12) | |
4786 | rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
4787 | ; Form index offset from hash page number | |
4788 | add r31,r31,r4 ; r31 <- hash page index entry | |
4789 | ld r31,0(r31) ; r31 <- hash page paddr | |
4790 | insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
4791 | ; r31 <- hash group paddr | |
4792 | ||
4793 | la r3,pmapSXlk(r28) ; Point to the host pmap's search lock | |
4794 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
4795 | mr. r3,r3 ; Did we get the lock? | |
4796 | bne-- hpfBadLock ; Nope... | |
55e303ae | 4797 | |
91447636 A |
4798 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags |
4799 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
4800 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
4801 | addi r6,r6,1 ; Increment guest fault count | |
4802 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
4803 | mtctr r0 ; in this group | |
4804 | stw r6,vxsGpf(r11) ; Update guest fault count | |
4805 | b hpfGVlp64 | |
4806 | ||
4807 | .align 5 | |
4808 | hpfGVlp64: | |
4809 | mr r6,r3 ; r6 <- current mapping slot's flags | |
4810 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
4811 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
4812 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
4813 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
4814 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
4815 | andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag | |
4816 | xor r7,r7,r21 ; Compare space ID | |
4817 | or r0,r6,r7 ; r0 <- !(!free && !dormant && space match) | |
4818 | xor r8,r8,r10 ; Compare virtual address | |
4819 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4820 | beq hpfGVfound ; Join common path on hit (r31 points to mapping) | |
4821 | ||
4822 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
4823 | bdnz hpfGVlp64 ; Iterate | |
4824 | ||
4825 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
4826 | andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag | |
4827 | xor r4,r4,r21 ; Compare space ID | |
4828 | or r0,r3,r4 ; r0 <- !(!free && !dormant && space match) | |
4829 | xor r5,r5,r10 ; Compare virtual address | |
4830 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4831 | beq hpfGVfound ; Join common path on hit (r31 points to mapping) | |
4832 | ||
4833 | hpfGVmiss: | |
4834 | lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count | |
4835 | addi r6,r6,1 ; Increment miss count | |
4836 | stw r6,vxsGpfMiss(r11) ; Update guest fault miss count | |
4837 | b hpfNotFound | |
55e303ae A |
4838 | |
4839 | /* | |
4840 | * hw_set_user_space(pmap) | |
4841 | * hw_set_user_space_dis(pmap) | |
4842 | * | |
4843 | * Indicate whether memory space needs to be switched. | |
4844 | * We really need to turn off interrupts here, because we need to be non-preemptable | |
de355530 A |
4845 | * |
4846 | * hw_set_user_space_dis is used when interruptions are already disabled. Mind the | |
4847 | * register usage here. The VMM switch code in vmachmon.s that calls this | |
4848 | * know what registers are in use. Check that if these change. | |
4849 | */ | |
1c79356b | 4850 | |
1c79356b | 4851 | |
55e303ae A |
4852 | |
4853 | .align 5 | |
4854 | .globl EXT(hw_set_user_space) | |
4855 | ||
4856 | LEXT(hw_set_user_space) | |
4857 | ||
4858 | lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable | |
4859 | mfmsr r10 ; Get the current MSR | |
4860 | ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP | |
4861 | ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE | |
4862 | andc r10,r10,r8 ; Turn off VEC, FP for good | |
4863 | andc r9,r10,r9 ; Turn off EE also | |
4864 | mtmsr r9 ; Disable them | |
4865 | isync ; Make sure FP and vec are off | |
91447636 A |
4866 | mfsprg r6,1 ; Get the current activation |
4867 | lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block | |
55e303ae A |
4868 | lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address |
4869 | mfsprg r4,2 ; The the feature flags | |
4870 | lwz r7,pmapvr(r3) ; Get the v to r translation | |
4871 | lwz r8,pmapvr+4(r3) ; Get the v to r translation | |
4872 | mtcrf 0x80,r4 ; Get the Altivec flag | |
4873 | xor r4,r3,r8 ; Get bottom of the real address of bmap anchor | |
4874 | cmplw cr1,r3,r2 ; Same address space as before? | |
4875 | stw r7,ppUserPmap(r6) ; Show our real pmap address | |
4876 | crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine | |
4877 | stw r4,ppUserPmap+4(r6) ; Show our real pmap address | |
4878 | stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address | |
4879 | mtmsr r10 ; Restore interruptions | |
4880 | beqlr-- cr1 ; Leave if the same address space or not Altivec | |
4881 | ||
4882 | dssall ; Need to kill all data streams if adrsp changed | |
4883 | sync | |
4884 | blr ; Return... | |
4885 | ||
4886 | .align 5 | |
4887 | .globl EXT(hw_set_user_space_dis) | |
4888 | ||
4889 | LEXT(hw_set_user_space_dis) | |
4890 | ||
4891 | lwz r7,pmapvr(r3) ; Get the v to r translation | |
4892 | mfsprg r4,2 ; The the feature flags | |
4893 | lwz r8,pmapvr+4(r3) ; Get the v to r translation | |
91447636 A |
4894 | mfsprg r6,1 ; Get the current activation |
4895 | lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block | |
55e303ae A |
4896 | lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address |
4897 | mtcrf 0x80,r4 ; Get the Altivec flag | |
4898 | xor r4,r3,r8 ; Get bottom of the real address of bmap anchor | |
4899 | cmplw cr1,r3,r2 ; Same address space as before? | |
4900 | stw r7,ppUserPmap(r6) ; Show our real pmap address | |
4901 | crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine | |
4902 | stw r4,ppUserPmap+4(r6) ; Show our real pmap address | |
4903 | stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address | |
4904 | beqlr-- cr1 ; Leave if the same | |
4905 | ||
4906 | dssall ; Need to kill all data streams if adrsp changed | |
4907 | sync | |
4908 | blr ; Return... | |
4909 | ||
4910 | /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry | |
4911 | * | |
4912 | * Lock must already be held on mapping block list | |
4913 | * returns 0 if all slots filled. | |
4914 | * returns n if a slot is found and it is not the last | |
4915 | * returns -n if a slot is found and it is the last | |
4916 | * when n and -n are returned, the corresponding bit is cleared | |
4917 | * the mapping is zeroed out before return | |
4918 | * | |
4919 | */ | |
4920 | ||
4921 | .align 5 | |
4922 | .globl EXT(mapalc1) | |
4923 | ||
4924 | LEXT(mapalc1) | |
4925 | lwz r4,mbfree(r3) ; Get the 1st mask | |
4926 | lis r0,0x8000 ; Get the mask to clear the first free bit | |
4927 | lwz r5,mbfree+4(r3) ; Get the 2nd mask | |
4928 | mr r12,r3 ; Save the block ptr | |
4929 | cntlzw r3,r4 ; Get first 1-bit in 1st word | |
4930 | srw. r9,r0,r3 ; Get bit corresponding to first free one | |
4931 | cntlzw r10,r5 ; Get first free field in second word | |
4932 | andc r4,r4,r9 ; Turn 1-bit off in 1st word | |
4933 | bne mapalc1f ; Found one in 1st word | |
4934 | ||
4935 | srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word | |
4936 | li r3,0 ; assume failure return | |
4937 | andc r5,r5,r9 ; Turn it off | |
4938 | beqlr-- ; There are no 1 bits left... | |
4939 | addi r3,r10,32 ; set the correct number | |
4940 | ||
4941 | mapalc1f: | |
4942 | or. r0,r4,r5 ; any more bits set? | |
4943 | stw r4,mbfree(r12) ; update bitmasks | |
4944 | stw r5,mbfree+4(r12) | |
4945 | ||
4946 | slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block | |
4947 | addi r7,r6,32 | |
4948 | dcbz r6,r12 ; clear the 64-byte mapping | |
4949 | dcbz r7,r12 | |
4950 | ||
4951 | bnelr++ ; return if another bit remains set | |
4952 | ||
4953 | neg r3,r3 ; indicate we just returned the last bit | |
4954 | blr | |
4955 | ||
4956 | ||
4957 | /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry | |
4958 | * | |
4959 | * Lock must already be held on mapping block list | |
4960 | * returns 0 if all slots filled. | |
4961 | * returns n if a slot is found and it is not the last | |
4962 | * returns -n if a slot is found and it is the last | |
4963 | * when n and -n are returned, the corresponding bits are cleared | |
4964 | * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)). | |
4965 | * the mapping is zero'd out before return | |
4966 | */ | |
4967 | ||
4968 | .align 5 | |
4969 | .globl EXT(mapalc2) | |
4970 | LEXT(mapalc2) | |
4971 | lwz r4,mbfree(r3) ; Get the first mask | |
4972 | lis r0,0x8000 ; Get the mask to clear the first free bit | |
4973 | lwz r5,mbfree+4(r3) ; Get the second mask | |
4974 | mr r12,r3 ; Save the block ptr | |
4975 | slwi r6,r4,1 ; shift first word over | |
4976 | and r6,r4,r6 ; lite start of double bit runs in 1st word | |
4977 | slwi r7,r5,1 ; shift 2nd word over | |
4978 | cntlzw r3,r6 ; Get first free 2-bit run in 1st word | |
4979 | and r7,r5,r7 ; lite start of double bit runs in 2nd word | |
4980 | srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word | |
4981 | cntlzw r10,r7 ; Get first free field in second word | |
4982 | srwi r11,r9,1 ; shift over for 2nd bit in 1st word | |
4983 | andc r4,r4,r9 ; Turn off 1st bit in 1st word | |
4984 | andc r4,r4,r11 ; turn off 2nd bit in 1st word | |
4985 | bne mapalc2a ; Found two consecutive free bits in 1st word | |
4986 | ||
4987 | srw. r9,r0,r10 ; Get bit corresponding to first free one in second word | |
4988 | li r3,0 ; assume failure | |
4989 | srwi r11,r9,1 ; get mask for 2nd bit | |
4990 | andc r5,r5,r9 ; Turn off 1st bit in 2nd word | |
4991 | andc r5,r5,r11 ; turn off 2nd bit in 2nd word | |
4992 | beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either | |
4993 | addi r3,r10,32 ; set the correct number | |
4994 | ||
4995 | mapalc2a: | |
4996 | or. r0,r4,r5 ; any more bits set? | |
4997 | stw r4,mbfree(r12) ; update bitmasks | |
4998 | stw r5,mbfree+4(r12) | |
4999 | slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block | |
5000 | addi r7,r6,32 | |
5001 | addi r8,r6,64 | |
5002 | addi r9,r6,96 | |
5003 | dcbz r6,r12 ; zero out the 128-byte mapping | |
5004 | dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines | |
5005 | dcbz r8,r12 ; because the mapping may not be 128-byte aligned | |
5006 | dcbz r9,r12 | |
5007 | ||
5008 | bnelr++ ; return if another bit remains set | |
5009 | ||
5010 | neg r3,r3 ; indicate we just returned the last bit | |
5011 | blr | |
5012 | ||
5013 | mapalc2c: | |
5014 | rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31 | |
5015 | and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free? | |
5016 | beqlr ; no, we failed | |
5017 | rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word | |
5018 | rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word | |
5019 | li r3,31 ; get index of this field | |
5020 | b mapalc2a | |
5021 | ||
5022 | ||
5023 | ; | |
5024 | ; This routine initialzes the hash table and PCA. | |
5025 | ; It is done here because we may need to be 64-bit to do it. | |
5026 | ; | |
5027 | ||
5028 | .align 5 | |
5029 | .globl EXT(hw_hash_init) | |
5030 | ||
5031 | LEXT(hw_hash_init) | |
5032 | ||
5033 | mfsprg r10,2 ; Get feature flags | |
5034 | lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address | |
5035 | mtcrf 0x02,r10 ; move pf64Bit to cr6 | |
5036 | lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address | |
5037 | lis r4,0xFF01 ; Set all slots free and start steal at end | |
5038 | ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address | |
5039 | ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address | |
5040 | ||
5041 | lwz r12,0(r12) ; Get hash table size | |
5042 | li r3,0 ; Get start | |
5043 | bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint) | |
5044 | ||
5045 | lwz r11,4(r11) ; Get hash table base | |
5046 | ||
5047 | hhiNext32: cmplw r3,r12 ; Have we reached the end? | |
5048 | bge- hhiCPCA32 ; Yes... | |
5049 | dcbz r3,r11 ; Clear the line | |
5050 | addi r3,r3,32 ; Next one... | |
5051 | b hhiNext32 ; Go on... | |
5052 | ||
5053 | hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4 | |
5054 | li r3,-4 ; Displacement to first PCA entry | |
5055 | neg r12,r12 ; Get negative end of PCA | |
5056 | ||
5057 | hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry | |
5058 | subi r3,r3,4 ; Next slot | |
5059 | cmpw r3,r12 ; Have we finished? | |
5060 | bge+ hhiNPCA32 ; Not yet... | |
5061 | blr ; Leave... | |
5062 | ||
5063 | hhiSF: mfmsr r9 ; Save the MSR | |
5064 | li r8,1 ; Get a 1 | |
5065 | mr r0,r9 ; Get a copy of the MSR | |
5066 | ld r11,0(r11) ; Get hash table base | |
5067 | rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0) | |
5068 | mtmsrd r0 ; Turn on SF | |
5069 | isync | |
5070 | ||
5071 | ||
5072 | hhiNext64: cmpld r3,r12 ; Have we reached the end? | |
5073 | bge-- hhiCPCA64 ; Yes... | |
5074 | dcbz128 r3,r11 ; Clear the line | |
5075 | addi r3,r3,128 ; Next one... | |
5076 | b hhiNext64 ; Go on... | |
5077 | ||
5078 | hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4 | |
5079 | li r3,-4 ; Displacement to first PCA entry | |
5080 | neg r12,r12 ; Get negative end of PCA | |
5081 | ||
5082 | hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry | |
5083 | subi r3,r3,4 ; Next slot | |
5084 | cmpd r3,r12 ; Have we finished? | |
5085 | bge++ hhiNPCA64 ; Not yet... | |
5086 | ||
5087 | mtmsrd r9 ; Turn off SF if it was off | |
5088 | isync | |
5089 | blr ; Leave... | |
5090 | ||
5091 | ||
5092 | ; | |
5093 | ; This routine sets up the hardware to start translation. | |
5094 | ; Note that we do NOT start translation. | |
5095 | ; | |
5096 | ||
5097 | .align 5 | |
5098 | .globl EXT(hw_setup_trans) | |
5099 | ||
5100 | LEXT(hw_setup_trans) | |
5101 | ||
5102 | mfsprg r11,0 ; Get the per_proc block | |
5103 | mfsprg r12,2 ; Get feature flags | |
5104 | li r0,0 ; Get a 0 | |
5105 | li r2,1 ; And a 1 | |
5106 | mtcrf 0x02,r12 ; Move pf64Bit to cr6 | |
5107 | stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid | |
5108 | stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux | |
5109 | sth r2,ppInvSeg(r11) ; Force a reload of the SRs | |
5110 | sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel | |
5111 | ||
5112 | bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint) | |
5113 | ||
5114 | li r9,0 ; Clear out a register | |
5115 | sync | |
5116 | isync | |
5117 | mtdbatu 0,r9 ; Invalidate maps | |
5118 | mtdbatl 0,r9 ; Invalidate maps | |
5119 | mtdbatu 1,r9 ; Invalidate maps | |
5120 | mtdbatl 1,r9 ; Invalidate maps | |
5121 | mtdbatu 2,r9 ; Invalidate maps | |
5122 | mtdbatl 2,r9 ; Invalidate maps | |
5123 | mtdbatu 3,r9 ; Invalidate maps | |
5124 | mtdbatl 3,r9 ; Invalidate maps | |
5125 | ||
5126 | mtibatu 0,r9 ; Invalidate maps | |
5127 | mtibatl 0,r9 ; Invalidate maps | |
5128 | mtibatu 1,r9 ; Invalidate maps | |
5129 | mtibatl 1,r9 ; Invalidate maps | |
5130 | mtibatu 2,r9 ; Invalidate maps | |
5131 | mtibatl 2,r9 ; Invalidate maps | |
5132 | mtibatu 3,r9 ; Invalidate maps | |
5133 | mtibatl 3,r9 ; Invalidate maps | |
5134 | ||
5135 | lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address | |
5136 | lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address | |
5137 | ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address | |
5138 | ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address | |
5139 | lwz r11,4(r11) ; Get hash table base | |
5140 | lwz r12,0(r12) ; Get hash table size | |
5141 | subi r12,r12,1 ; Back off by 1 | |
5142 | rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image | |
5143 | ||
5144 | mtsdr1 r11 ; Ok, we now have the hash table set up | |
5145 | sync | |
5146 | ||
5147 | li r12,invalSpace ; Get the invalid segment value | |
5148 | li r10,0 ; Start low | |
5149 | ||
5150 | hstsetsr: mtsrin r12,r10 ; Set the SR | |
5151 | addis r10,r10,0x1000 ; Bump the segment | |
5152 | mr. r10,r10 ; Are we finished? | |
5153 | bne+ hstsetsr ; Nope... | |
5154 | sync | |
5155 | blr ; Return... | |
5156 | ||
5157 | ; | |
5158 | ; 64-bit version | |
5159 | ; | |
5160 | ||
5161 | hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address | |
5162 | lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address | |
5163 | ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address | |
5164 | ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address | |
5165 | ld r11,0(r11) ; Get hash table base | |
5166 | lwz r12,0(r12) ; Get hash table size | |
5167 | cntlzw r10,r12 ; Get the number of bits | |
5168 | subfic r10,r10,13 ; Get the extra bits we need | |
5169 | or r11,r11,r10 ; Add the size field to SDR1 | |
5170 | ||
5171 | mtsdr1 r11 ; Ok, we now have the hash table set up | |
5172 | sync | |
5173 | ||
5174 | li r0,0 ; Set an SLB slot index of 0 | |
5175 | slbia ; Trash all SLB entries (except for entry 0 that is) | |
5176 | slbmfee r7,r0 ; Get the entry that is in SLB index 0 | |
5177 | rldicr r7,r7,0,35 ; Clear the valid bit and the rest | |
5178 | slbie r7 ; Invalidate it | |
5179 | ||
5180 | blr ; Return... | |
5181 | ||
5182 | ||
5183 | ; | |
5184 | ; This routine turns on translation for the first time on a processor | |
5185 | ; | |
5186 | ||
5187 | .align 5 | |
5188 | .globl EXT(hw_start_trans) | |
5189 | ||
5190 | LEXT(hw_start_trans) | |
5191 | ||
5192 | ||
5193 | mfmsr r10 ; Get the msr | |
5194 | ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation | |
5195 | ||
5196 | mtmsr r10 ; Everything falls apart here | |
5197 | isync | |
5198 | ||
5199 | blr ; Back to it. | |
5200 | ||
5201 | ||
5202 | ||
5203 | ; | |
5204 | ; This routine validates a segment register. | |
5205 | ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va) | |
5206 | ; | |
5207 | ; r3 = virtual pmap | |
5208 | ; r4 = segment[0:31] | |
5209 | ; r5 = segment[32:63] | |
5210 | ; r6 = va[0:31] | |
5211 | ; r7 = va[32:63] | |
5212 | ; | |
5213 | ; Note that we transform the addr64_t (long long) parameters into single 64-bit values. | |
5214 | ; Note that there is no reason to apply the key modifier here because this is only | |
5215 | ; used for kernel accesses. | |
5216 | ; | |
5217 | ||
5218 | .align 5 | |
5219 | .globl EXT(hw_map_seg) | |
5220 | ||
5221 | LEXT(hw_map_seg) | |
5222 | ||
5223 | lwz r0,pmapSpace(r3) ; Get the space, we will need it soon | |
5224 | lwz r9,pmapFlags(r3) ; Get the flags for the keys now | |
5225 | mfsprg r10,2 ; Get feature flags | |
55e303ae A |
5226 | |
5227 | ; | |
5228 | ; Note: the following code would problably be easier to follow if I split it, | |
5229 | ; but I just wanted to see if I could write this to work on both 32- and 64-bit | |
5230 | ; machines combined. | |
5231 | ; | |
5232 | ||
5233 | ; | |
5234 | ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines) | |
5235 | ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines) | |
5236 | ||
5237 | rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit | |
5238 | rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID | |
5239 | mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6 | |
5240 | srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest | |
5241 | rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25] | |
5242 | rlwimi r0,r0,14,4,17 ; Dup address space ID above itself | |
5243 | rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines) | |
5244 | rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half | |
5245 | rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32 | |
5246 | rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines) | |
5247 | ||
5248 | rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space | |
5249 | ; concatenated together. There is garbage | |
5250 | ; at the top for 64-bit but we will clean | |
5251 | ; that out later. | |
5252 | rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit | |
5253 | ||
5254 | ||
5255 | ; | |
5256 | ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or | |
5257 | ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines | |
5258 | ; | |
5259 | ||
5260 | ; | |
5261 | ; What we have now is: | |
5262 | ; | |
5263 | ; 0 0 1 2 3 4 4 5 6 | |
5264 | ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines | |
5265 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5266 | ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value | |
5267 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5268 | ; 0 0 1 2 3 - for 32-bit machines | |
5269 | ; 0 8 6 4 1 | |
5270 | ; | |
5271 | ; 0 0 1 2 3 4 4 5 6 | |
5272 | ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines | |
5273 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5274 | ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA | |
5275 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5276 | ; 0 0 1 2 3 - for 32-bit machines | |
5277 | ; 0 8 6 4 1 | |
5278 | ; | |
5279 | ; 0 0 1 2 3 4 4 5 6 | |
5280 | ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines | |
5281 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5282 | ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment | |
5283 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5284 | ; 0 0 1 2 3 - for 32-bit machines | |
5285 | ; 0 8 6 4 1 | |
5286 | ||
5287 | ||
5288 | xor r8,r8,r2 ; Calculate VSID | |
5289 | ||
5290 | bf-- pf64Bitb,hms32bit ; Skip out if 32-bit... | |
91447636 | 5291 | mfsprg r12,0 ; Get the per_proc |
55e303ae A |
5292 | li r0,1 ; Prepare to set bit 0 (also to clear EE) |
5293 | mfmsr r6 ; Get current MSR | |
5294 | li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits | |
5295 | mtmsrd r0,1 ; Set only the EE bit to 0 | |
5296 | rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on | |
5297 | mfmsr r11 ; Get the MSR right now, after disabling EE | |
5298 | andc r2,r11,r2 ; Turn off translation now | |
5299 | rldimi r2,r0,63,0 ; Get bit 64-bit turned on | |
5300 | or r11,r11,r6 ; Turn on the EE bit if it was on | |
5301 | mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on | |
5302 | isync ; Hang out a bit | |
5303 | ||
5304 | ld r6,validSegs(r12) ; Get the valid SLB entry flags | |
5305 | sldi r9,r9,9 ; Position the key and noex bit | |
5306 | ||
5307 | rldimi r5,r8,12,0 ; Form the VSID/key | |
5308 | ||
5309 | not r3,r6 ; Make valids be 0s | |
5310 | ||
5311 | cntlzd r7,r3 ; Find a free SLB | |
5312 | cmplwi r7,63 ; Did we find a free SLB entry? | |
5313 | ||
5314 | slbie r4 ; Since this ESID may still be in an SLBE, kill it | |
5315 | ||
5316 | oris r4,r4,0x0800 ; Turn on the valid bit in ESID | |
5317 | addi r7,r7,1 ; Make sure we skip slb 0 | |
5318 | blt++ hmsFreeSeg ; Yes, go load it... | |
5319 | ||
5320 | ; | |
5321 | ; No free SLB entries, select one that is in use and invalidate it | |
5322 | ; | |
5323 | lwz r2,ppSegSteal(r12) ; Get the next slot to steal | |
5324 | addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only | |
5325 | addi r2,r2,1 ; Set next slot to steal | |
5326 | slbmfee r3,r7 ; Get the entry that is in the selected spot | |
5327 | subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap | |
5328 | rldicr r3,r3,0,35 ; Clear the valid bit and the rest | |
5329 | srawi r8,r8,31 ; Get -1 if steal index still in range | |
5330 | slbie r3 ; Invalidate the in-use SLB entry | |
5331 | and r2,r2,r8 ; Reset steal index when it should wrap | |
5332 | isync ; | |
5333 | ||
5334 | stw r2,ppSegSteal(r12) ; Set the next slot to steal | |
5335 | ; | |
5336 | ; We are now ready to stick the SLB entry in the SLB and mark it in use | |
5337 | ; | |
5338 | ||
5339 | hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0 | |
5340 | rldimi r4,r7,0,58 ; Copy in the SLB entry selector | |
5341 | srd r0,r0,r2 ; Set bit mask for allocation | |
5342 | rldicl r5,r5,0,15 ; Clean out the unsupported bits | |
5343 | or r6,r6,r0 ; Turn on the allocation flag | |
5344 | ||
5345 | slbmte r5,r4 ; Make that SLB entry | |
5346 | ||
5347 | std r6,validSegs(r12) ; Mark as valid | |
5348 | mtmsrd r11 ; Restore the MSR | |
5349 | isync | |
5350 | blr ; Back to it... | |
5351 | ||
5352 | .align 5 | |
5353 | ||
91447636 A |
5354 | hms32bit: |
5355 | mfsprg r12,1 ; Get the current activation | |
5356 | lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block | |
5357 | rlwinm r8,r8,0,8,31 ; Clean up the VSID | |
55e303ae A |
5358 | rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting |
5359 | lis r0,0x8000 ; Set bit 0 | |
5360 | rlwimi r8,r9,28,1,3 ; Insert the keys and N bit | |
5361 | srw r0,r0,r2 ; Get bit corresponding to SR | |
5362 | addi r7,r12,validSegs ; Point to the valid segment flags directly | |
5363 | ||
5364 | mtsrin r8,r4 ; Set the actual SR | |
5365 | isync ; Need to make sure this is done | |
5366 | ||
5367 | hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags | |
5368 | or r6,r6,r0 ; Show that SR is valid | |
5369 | stwcx. r6,0,r7 ; Set the valid SR flags | |
5370 | bne-- hmsrupt ; Had an interrupt, need to get flags again... | |
5371 | ||
5372 | blr ; Back to it... | |
5373 | ||
5374 | ||
5375 | ; | |
5376 | ; This routine invalidates a segment register. | |
5377 | ; | |
5378 | ||
5379 | .align 5 | |
5380 | .globl EXT(hw_blow_seg) | |
5381 | ||
5382 | LEXT(hw_blow_seg) | |
5383 | ||
5384 | mfsprg r10,2 ; Get feature flags | |
55e303ae A |
5385 | mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6 |
5386 | ||
55e303ae A |
5387 | rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean |
5388 | ||
5389 | bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit... | |
5390 | ||
5391 | li r0,1 ; Prepare to set bit 0 (also to clear EE) | |
5392 | mfmsr r6 ; Get current MSR | |
5393 | li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits | |
5394 | mtmsrd r0,1 ; Set only the EE bit to 0 | |
5395 | rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on | |
5396 | mfmsr r11 ; Get the MSR right now, after disabling EE | |
5397 | andc r2,r11,r2 ; Turn off translation now | |
5398 | rldimi r2,r0,63,0 ; Get bit 64-bit turned on | |
5399 | or r11,r11,r6 ; Turn on the EE bit if it was on | |
5400 | mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on | |
5401 | isync ; Hang out a bit | |
5402 | ||
5403 | rldimi r9,r3,32,0 ; Insert the top part of the ESID | |
5404 | ||
5405 | slbie r9 ; Invalidate the associated SLB entry | |
5406 | ||
5407 | mtmsrd r11 ; Restore the MSR | |
5408 | isync | |
5409 | blr ; Back to it. | |
5410 | ||
5411 | .align 5 | |
5412 | ||
91447636 A |
5413 | hbs32bit: |
5414 | mfsprg r12,1 ; Get the current activation | |
5415 | lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block | |
5416 | addi r7,r12,validSegs ; Point to the valid segment flags directly | |
5417 | lwarx r4,0,r7 ; Get and reserve the valid segment flags | |
55e303ae A |
5418 | rlwinm r6,r9,4,28,31 ; Convert segment to number |
5419 | lis r2,0x8000 ; Set up a mask | |
5420 | srw r2,r2,r6 ; Make a mask | |
5421 | and. r0,r4,r2 ; See if this is even valid | |
5422 | li r5,invalSpace ; Set the invalid address space VSID | |
5423 | beqlr ; Leave if already invalid... | |
5424 | ||
5425 | mtsrin r5,r9 ; Slam the segment register | |
5426 | isync ; Need to make sure this is done | |
5427 | ||
5428 | hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment | |
5429 | stwcx. r4,0,r7 ; Set the valid SR flags | |
5430 | beqlr++ ; Stored ok, no interrupt, time to leave... | |
5431 | ||
5432 | lwarx r4,0,r7 ; Get and reserve the valid segment flags again | |
5433 | b hbsrupt ; Try again... | |
5434 | ||
5435 | ; | |
5436 | ; This routine invadates the entire pmap segment cache | |
5437 | ; | |
5438 | ; Translation is on, interrupts may or may not be enabled. | |
5439 | ; | |
5440 | ||
5441 | .align 5 | |
5442 | .globl EXT(invalidateSegs) | |
5443 | ||
5444 | LEXT(invalidateSegs) | |
5445 | ||
5446 | la r10,pmapCCtl(r3) ; Point to the segment cache control | |
5447 | eqv r2,r2,r2 ; Get all foxes | |
5448 | ||
5449 | isInv: lwarx r4,0,r10 ; Get the segment cache control value | |
5450 | rlwimi r4,r2,0,0,15 ; Slam in all invalid bits | |
5451 | rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? | |
5452 | bne-- isInv0 ; Yes, try again... | |
5453 | ||
5454 | stwcx. r4,0,r10 ; Try to invalidate it | |
5455 | bne-- isInv ; Someone else just stuffed it... | |
5456 | blr ; Leave... | |
5457 | ||
5458 | ||
5459 | isInv0: li r4,lgKillResv ; Get reservation kill zone | |
5460 | stwcx. r4,0,r4 ; Kill reservation | |
5461 | ||
5462 | isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control | |
5463 | rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? | |
5464 | bne-- isInv ; Nope... | |
5465 | b isInv1 ; Still locked do it again... | |
5466 | ||
5467 | ; | |
5468 | ; This routine switches segment registers between kernel and user. | |
5469 | ; We have some assumptions and rules: | |
5470 | ; We are in the exception vectors | |
5471 | ; pf64Bitb is set up | |
5472 | ; R3 contains the MSR we going to | |
2d21ac55 | 5473 | ; We can not use R4, R13, R20, R21, R25, R26, R29 |
55e303ae A |
5474 | ; R13 is the savearea |
5475 | ; R29 has the per_proc | |
5476 | ; | |
5477 | ; We return R3 as 0 if we did not switch between kernel and user | |
5478 | ; We also maintain and apply the user state key modifier used by VMM support; | |
5479 | ; If we go to the kernel it is set to 0, otherwise it follows the bit | |
5480 | ; in spcFlags. | |
5481 | ; | |
5482 | ||
d7e50217 | 5483 | .align 5 |
55e303ae | 5484 | .globl EXT(switchSegs) |
1c79356b | 5485 | |
55e303ae A |
5486 | LEXT(switchSegs) |
5487 | ||
5488 | lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator) | |
5489 | lwz r9,spcFlags(r29) ; Pick up the special user state flags | |
5490 | rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit | |
5491 | rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit | |
5492 | lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel | |
5493 | or r2,r2,r3 ; This will 1 if we will be using user segments | |
5494 | li r3,0 ; Get a selection mask | |
5495 | cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg | |
5496 | ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address) | |
5497 | sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user | |
5498 | la r19,ppUserPmap(r29) ; Point to the current user pmap | |
5499 | ||
5500 | ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice | |
5501 | rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key | |
5502 | ||
5503 | andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise | |
5504 | and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise | |
5505 | and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise | |
5506 | or r8,r8,r19 ; Get the pointer to the pmap we are using | |
5507 | ||
5508 | beqlr ; We are staying in the same mode, do not touch segs... | |
5509 | ||
5510 | lwz r28,0(r8) ; Get top half of pmap address | |
5511 | lwz r10,4(r8) ; Get bottom half | |
5512 | ||
5513 | stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg | |
5514 | rlwinm r28,r28,0,1,0 ; Copy top to top | |
5515 | stw r30,ppMapFlags(r29) ; Set the key modifier | |
5516 | rlwimi r28,r10,0,0,31 ; Insert bottom | |
5517 | ||
5518 | la r10,pmapCCtl(r28) ; Point to the segment cache control | |
5519 | la r9,pmapSegCache(r28) ; Point to the segment cache | |
5520 | ||
5521 | ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control | |
5522 | rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock? | |
5523 | ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit | |
5524 | bne-- ssgLock0 ; Yup, this is in use... | |
5525 | ||
5526 | stwcx. r16,0,r10 ; Try to set the lock | |
5527 | bne-- ssgLock ; Did we get contention? | |
5528 | ||
5529 | not r11,r15 ; Invert the invalids to valids | |
5530 | li r17,0 ; Set a mask for the SRs we are loading | |
5531 | isync ; Make sure we are all caught up | |
5532 | ||
5533 | bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it... | |
5534 | ||
5535 | li r0,0 ; Clear | |
5536 | slbia ; Trash all SLB entries (except for entry 0 that is) | |
5537 | li r17,1 ; Get SLB index to load (skip slb 0) | |
5538 | oris r0,r0,0x8000 ; Get set for a mask | |
5539 | b ssg64Enter ; Start on a cache line... | |
d7e50217 A |
5540 | |
5541 | .align 5 | |
d7e50217 | 5542 | |
55e303ae A |
5543 | ssgLock0: li r15,lgKillResv ; Killing field |
5544 | stwcx. r15,0,r15 ; Kill reservation | |
d7e50217 | 5545 | |
55e303ae A |
5546 | ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls |
5547 | rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock? | |
5548 | beq++ ssgLock ; Yup, this is in use... | |
5549 | b ssgLock1 ; Nope, try again... | |
5550 | ; | |
5551 | ; This is the 32-bit address space switch code. | |
5552 | ; We take a reservation on the segment cache and walk through. | |
5553 | ; For each entry, we load the specified entries and remember which | |
5554 | ; we did with a mask. Then, we figure out which segments should be | |
5555 | ; invalid and then see which actually are. Then we load those with the | |
5556 | ; defined invalid VSID. | |
5557 | ; Afterwards, we unlock the segment cache. | |
5558 | ; | |
d7e50217 | 5559 | |
55e303ae A |
5560 | .align 5 |
5561 | ||
5562 | ssg32Enter: cntlzw r12,r11 ; Find the next slot in use | |
5563 | cmplwi r12,pmapSegCacheUse ; See if we are done | |
5564 | slwi r14,r12,4 ; Index to the cache slot | |
5565 | lis r0,0x8000 ; Get set for a mask | |
5566 | add r14,r14,r9 ; Point to the entry | |
5567 | ||
5568 | bge- ssg32Done ; All done... | |
5569 | ||
5570 | lwz r5,sgcESID+4(r14) ; Get the ESID part | |
5571 | srw r2,r0,r12 ; Form a mask for the one we are loading | |
5572 | lwz r7,sgcVSID+4(r14) ; And get the VSID bottom | |
5573 | ||
5574 | andc r11,r11,r2 ; Clear the bit | |
5575 | lwz r6,sgcVSID(r14) ; And get the VSID top | |
5576 | ||
5577 | rlwinm r2,r5,4,28,31 ; Change the segment number to a number | |
5578 | ||
5579 | xor r7,r7,r30 ; Modify the key before we actually set it | |
5580 | srw r0,r0,r2 ; Get a mask for the SR we are loading | |
5581 | rlwinm r8,r7,19,1,3 ; Insert the keys and N bit | |
5582 | or r17,r17,r0 ; Remember the segment | |
5583 | rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID | |
5584 | rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents | |
5585 | ||
5586 | mtsrin r8,r5 ; Load the segment | |
5587 | b ssg32Enter ; Go enter the next... | |
5588 | ||
5589 | .align 5 | |
5590 | ||
5591 | ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags | |
5592 | stw r15,pmapCCtl(r28) ; Unlock the segment cache controls | |
5593 | ||
5594 | lis r0,0x8000 ; Get set for a mask | |
5595 | li r2,invalSpace ; Set the invalid address space VSID | |
5596 | ||
5597 | nop ; Align loop | |
5598 | nop ; Align loop | |
5599 | andc r16,r16,r17 ; Get list of SRs that were valid before but not now | |
5600 | nop ; Align loop | |
5601 | ||
5602 | ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate | |
5603 | cmplwi r18,16 ; Have we finished? | |
5604 | srw r22,r0,r18 ; Get the mask bit | |
5605 | rlwinm r23,r18,28,0,3 ; Get the segment register we need | |
5606 | andc r16,r16,r22 ; Get rid of the guy we just did | |
5607 | bge ssg32Really ; Yes, we are really done now... | |
5608 | ||
5609 | mtsrin r2,r23 ; Invalidate the SR | |
5610 | b ssg32Inval ; Do the next... | |
5611 | ||
5612 | .align 5 | |
5613 | ||
5614 | ssg32Really: | |
5615 | stw r17,validSegs(r29) ; Set the valid SR flags | |
5616 | li r3,1 ; Set kernel/user transition | |
5617 | blr | |
5618 | ||
5619 | ; | |
5620 | ; This is the 64-bit address space switch code. | |
5621 | ; First we blow away all of the SLB entries. | |
5622 | ; Walk through, | |
5623 | ; loading the SLB. Afterwards, we release the cache lock | |
5624 | ; | |
5625 | ; Note that because we have to treat SLBE 0 specially, we do not ever use it... | |
5626 | ; Its a performance thing... | |
5627 | ; | |
1c79356b A |
5628 | |
5629 | .align 5 | |
1c79356b | 5630 | |
55e303ae A |
5631 | ssg64Enter: cntlzw r12,r11 ; Find the next slot in use |
5632 | cmplwi r12,pmapSegCacheUse ; See if we are done | |
5633 | slwi r14,r12,4 ; Index to the cache slot | |
5634 | srw r16,r0,r12 ; Form a mask for the one we are loading | |
5635 | add r14,r14,r9 ; Point to the entry | |
5636 | andc r11,r11,r16 ; Clear the bit | |
5637 | bge-- ssg64Done ; All done... | |
5638 | ||
5639 | ld r5,sgcESID(r14) ; Get the ESID part | |
5640 | ld r6,sgcVSID(r14) ; And get the VSID part | |
5641 | oris r5,r5,0x0800 ; Turn on the valid bit | |
5642 | or r5,r5,r17 ; Insert the SLB slot | |
5643 | xor r6,r6,r30 ; Modify the key before we actually set it | |
5644 | addi r17,r17,1 ; Bump to the next slot | |
5645 | slbmte r6,r5 ; Make that SLB entry | |
5646 | b ssg64Enter ; Go enter the next... | |
1c79356b | 5647 | |
55e303ae | 5648 | .align 5 |
d7e50217 | 5649 | |
55e303ae | 5650 | ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls |
d7e50217 | 5651 | |
55e303ae A |
5652 | eqv r16,r16,r16 ; Load up with all foxes |
5653 | subfic r17,r17,64 ; Get the number of 1 bits we need | |
5654 | ||
5655 | sld r16,r16,r17 ; Get a mask for the used SLB entries | |
5656 | li r3,1 ; Set kernel/user transition | |
5657 | std r16,validSegs(r29) ; Set the valid SR flags | |
1c79356b A |
5658 | blr |
5659 | ||
55e303ae A |
5660 | ; |
5661 | ; mapSetUp - this function sets initial state for all mapping functions. | |
5662 | ; We turn off all translations (physical), disable interruptions, and | |
5663 | ; enter 64-bit mode if applicable. | |
5664 | ; | |
5665 | ; We also return the original MSR in r11, the feature flags in R12, | |
5666 | ; and CR6 set up so we can do easy branches for 64-bit | |
91447636 | 5667 | ; hw_clear_maps assumes r10, r9 will not be trashed. |
55e303ae A |
5668 | ; |
5669 | ||
5670 | .align 5 | |
5671 | .globl EXT(mapSetUp) | |
5672 | ||
5673 | LEXT(mapSetUp) | |
5674 | ||
5675 | lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask | |
5676 | mfsprg r12,2 ; Get feature flags | |
5677 | ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well | |
5678 | mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6 | |
5679 | mfmsr r11 ; Save the MSR | |
5680 | mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6 | |
5681 | andc r11,r11,r0 ; Clear VEC and FP for good | |
5682 | ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR | |
5683 | li r2,1 ; Prepare for 64 bit | |
5684 | andc r0,r11,r0 ; Clear the rest | |
5685 | bt pfNoMSRirb,msuNoMSR ; No MSR... | |
5686 | bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint) | |
d7e50217 | 5687 | |
55e303ae A |
5688 | mtmsr r0 ; Translation and all off |
5689 | isync ; Toss prefetch | |
5690 | blr ; Return... | |
5691 | ||
5692 | .align 5 | |
5693 | ||
5694 | msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0) | |
5695 | mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR | |
5696 | isync ; synchronize | |
5697 | blr ; Return... | |
5698 | ||
5699 | .align 5 | |
5700 | ||
5701 | msuNoMSR: mr r2,r3 ; Save R3 across call | |
5702 | mr r3,r0 ; Get the new MSR value | |
5703 | li r0,loadMSR ; Get the MSR setter SC | |
5704 | sc ; Set it | |
5705 | mr r3,r2 ; Restore R3 | |
5706 | blr ; Go back all set up... | |
5707 | ||
5708 | ||
91447636 A |
5709 | ; |
5710 | ; Guest shadow assist -- remove all guest mappings | |
5711 | ; | |
5712 | ; Remove all mappings for a guest pmap from the shadow hash table. | |
5713 | ; | |
5714 | ; Parameters: | |
5715 | ; r3 : address of pmap, 32-bit kernel virtual address | |
5716 | ; | |
5717 | ; Non-volatile register usage: | |
5718 | ; r24 : host pmap's physical address | |
5719 | ; r25 : VMM extension block's physical address | |
5720 | ; r26 : physent address | |
5721 | ; r27 : guest pmap's space ID number | |
5722 | ; r28 : current hash table page index | |
5723 | ; r29 : guest pmap's physical address | |
5724 | ; r30 : saved msr image | |
5725 | ; r31 : current mapping | |
5726 | ; | |
5727 | .align 5 | |
5728 | .globl EXT(hw_rem_all_gv) | |
5729 | ||
5730 | LEXT(hw_rem_all_gv) | |
5731 | ||
5732 | #define graStackSize ((31-24+1)*4)+4 | |
5733 | stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1) | |
5734 | ; Mint a new stack frame | |
5735 | mflr r0 ; Get caller's return address | |
5736 | mfsprg r11,2 ; Get feature flags | |
5737 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
5738 | stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
5739 | ; Save caller's return address | |
5740 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
5741 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
5742 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
5743 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
5744 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
5745 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
5746 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
5747 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
5748 | ||
5749 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
5750 | ||
5751 | bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine | |
5752 | lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr | |
5753 | lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt | |
5754 | lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr | |
5755 | b graStart ; Get to it | |
5756 | gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr | |
5757 | ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt | |
5758 | ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr | |
5759 | graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode | |
5760 | xor r29,r3,r9 ; Convert pmap_t virt->real | |
5761 | mr r30,r11 ; Save caller's msr image | |
5762 | ||
5763 | la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
5764 | bl sxlkExclusive ; Get lock exclusive | |
5765 | ||
5766 | lwz r3,vxsGra(r25) ; Get remove all count | |
5767 | addi r3,r3,1 ; Increment remove all count | |
5768 | stw r3,vxsGra(r25) ; Update remove all count | |
5769 | ||
5770 | li r28,0 ; r28 <- first hash page table index to search | |
5771 | lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number | |
5772 | graPgLoop: | |
5773 | la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index | |
5774 | rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK | |
5775 | ; Convert page index into page physical index offset | |
5776 | add r31,r31,r11 ; Calculate page physical index entry address | |
5777 | bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit | |
5778 | lwz r31,4(r31) ; r31 <- first slot in hash table page to examine | |
5779 | b graLoop ; Examine all slots in this page | |
5780 | gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine | |
5781 | b graLoop ; Examine all slots in this page | |
5782 | ||
5783 | .align 5 | |
5784 | graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags | |
5785 | lhz r4,mpSpace(r31) ; Get mapping's space ID number | |
5786 | rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag | |
5787 | xor r4,r4,r27 ; Compare space ID number | |
5788 | or. r0,r6,r4 ; cr0_eq <- !free && space id match | |
5789 | bne graMiss ; Not one of ours, skip it | |
5790 | ||
5791 | lwz r11,vxsGraHits(r25) ; Get remove hit count | |
5792 | addi r11,r11,1 ; Increment remove hit count | |
5793 | stw r11,vxsGraHits(r25) ; Update remove hit count | |
5794 | ||
5795 | rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant? | |
5796 | bne graRemPhys ; Yes, nothing to disconnect | |
5797 | ||
5798 | lwz r11,vxsGraActive(r25) ; Get remove active count | |
5799 | addi r11,r11,1 ; Increment remove hit count | |
5800 | stw r11,vxsGraActive(r25) ; Update remove hit count | |
5801 | ||
5802 | bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately | |
5803 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
5804 | ; r31 <- mapping's physical address | |
5805 | ; r3 -> PTE slot physical address | |
5806 | ; r4 -> High-order 32 bits of PTE | |
5807 | ; r5 -> Low-order 32 bits of PTE | |
5808 | ; r6 -> PCA | |
5809 | ; r7 -> PCA physical address | |
5810 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
5811 | b graFreePTE ; Join 64-bit path to release the PTE | |
5812 | graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
5813 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
5814 | graFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
5815 | beq- graRemPhys ; No valid PTE, we're almost done | |
5816 | lis r0,0x8000 ; Prepare free bit for this slot | |
5817 | srw r0,r0,r2 ; Position free bit | |
5818 | or r6,r6,r0 ; Set it in our PCA image | |
5819 | lwz r8,mpPte(r31) ; Get PTE pointer | |
5820 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
5821 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
5822 | eieio ; Synchronize all previous updates (mapInvPtexx doesn't) | |
5823 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
5824 | ||
5825 | graRemPhys: | |
5826 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
5827 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
5828 | mr. r26,r3 ; Got lock on our physent? | |
5829 | beq-- graBadPLock ; No, time to bail out | |
5830 | ||
5831 | crset cr1_eq ; cr1_eq <- previous link is the anchor | |
5832 | bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine | |
5833 | la r11,ppLink+4(r26) ; Point to chain anchor | |
5834 | lwz r9,ppLink+4(r26) ; Get chain anchor | |
5835 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
5836 | ||
5837 | graRemLoop: beq- graRemoveMiss ; End of chain, this is not good | |
5838 | cmplw r9,r31 ; Is this the mapping to remove? | |
5839 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
5840 | bne graRemNext ; No, chain onward | |
5841 | bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor | |
5842 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
5843 | b graRemoved ; Exit loop | |
5844 | graRemRetry: | |
5845 | lwarx r0,0,r11 ; Get previous link | |
5846 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
5847 | stwcx. r0,0,r11 ; Update previous link | |
5848 | bne- graRemRetry ; Lost reservation, retry | |
5849 | b graRemoved ; Good work, let's get outta here | |
5850 | ||
5851 | graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
5852 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
5853 | mr. r9,r8 ; Does next entry exist? | |
5854 | b graRemLoop ; Carry on | |
5855 | ||
5856 | graRemove64: | |
5857 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
5858 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
5859 | la r11,ppLink(r26) ; Point to chain anchor | |
5860 | ld r9,ppLink(r26) ; Get chain anchor | |
5861 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
5862 | graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good | |
5863 | cmpld r9,r31 ; Is this the mapping to remove? | |
5864 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
5865 | bne graRem64Nxt ; Not mapping to remove, chain on, dude | |
5866 | bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor | |
5867 | std r8,0(r11) ; Unchain gpv->phys mapping | |
5868 | b graRemoved ; Exit loop | |
5869 | graRem64Rt: ldarx r0,0,r11 ; Get previous link | |
5870 | and r0,r0,r7 ; Get flags | |
5871 | or r0,r0,r8 ; Insert new forward pointer | |
5872 | stdcx. r0,0,r11 ; Slam it back in | |
5873 | bne-- graRem64Rt ; Lost reservation, retry | |
5874 | b graRemoved ; Good work, let's go home | |
5875 | ||
5876 | graRem64Nxt: | |
5877 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
5878 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
5879 | mr. r9,r8 ; Does next entry exist? | |
5880 | b graRem64Lp ; Carry on | |
5881 | ||
5882 | graRemoved: | |
5883 | mr r3,r26 ; r3 <- physent's address | |
5884 | bl mapPhysUnlock ; Unlock the physent (and its chain of mappings) | |
5885 | ||
5886 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
5887 | rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags | |
5888 | ori r3,r3,mpgFree ; Mark mapping free | |
5889 | stw r3,mpFlags(r31) ; Update flags | |
5890 | ||
5891 | graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping | |
5892 | rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page? | |
5893 | bne graLoop ; No, examine next slot | |
5894 | addi r28,r28,1 ; Increment hash table page index | |
5895 | cmplwi r28,GV_HPAGES ; End of hash table? | |
5896 | bne graPgLoop ; Examine next hash table page | |
5897 | ||
5898 | la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
5899 | bl sxlkUnlock ; Release host pmap's search lock | |
5900 | ||
5901 | bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately | |
5902 | mtmsr r30 ; Restore 'rupts, translation | |
5903 | isync ; Throw a small wrench into the pipeline | |
5904 | b graPopFrame ; Nothing to do now but pop a frame and return | |
5905 | graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode | |
5906 | graPopFrame: | |
5907 | lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
5908 | ; Get caller's return address | |
5909 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
5910 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
5911 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
5912 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
5913 | mtlr r0 ; Prepare return address | |
5914 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
5915 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
5916 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
5917 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
5918 | lwz r1,0(r1) ; Pop stack frame | |
5919 | blr ; Return to caller | |
5920 | ||
5921 | graBadPLock: | |
5922 | graRemoveMiss: | |
5923 | lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the | |
5924 | ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb? | |
5925 | li r3,failMapping ; The BOMB, Dmitri. | |
5926 | sc ; The hydrogen bomb. | |
5927 | ||
5928 | ||
5929 | ; | |
5930 | ; Guest shadow assist -- remove local guest mappings | |
5931 | ; | |
5932 | ; Remove local mappings for a guest pmap from the shadow hash table. | |
5933 | ; | |
5934 | ; Parameters: | |
5935 | ; r3 : address of guest pmap, 32-bit kernel virtual address | |
5936 | ; | |
5937 | ; Non-volatile register usage: | |
5938 | ; r20 : current active map word's physical address | |
5939 | ; r21 : current hash table page address | |
5940 | ; r22 : updated active map word in process | |
5941 | ; r23 : active map word in process | |
5942 | ; r24 : host pmap's physical address | |
5943 | ; r25 : VMM extension block's physical address | |
5944 | ; r26 : physent address | |
5945 | ; r27 : guest pmap's space ID number | |
5946 | ; r28 : current active map index | |
5947 | ; r29 : guest pmap's physical address | |
5948 | ; r30 : saved msr image | |
5949 | ; r31 : current mapping | |
5950 | ; | |
5951 | .align 5 | |
5952 | .globl EXT(hw_rem_local_gv) | |
5953 | ||
5954 | LEXT(hw_rem_local_gv) | |
5955 | ||
5956 | #define grlStackSize ((31-20+1)*4)+4 | |
5957 | stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1) | |
5958 | ; Mint a new stack frame | |
5959 | mflr r0 ; Get caller's return address | |
5960 | mfsprg r11,2 ; Get feature flags | |
5961 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
5962 | stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
5963 | ; Save caller's return address | |
5964 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
5965 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
5966 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
5967 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
5968 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
5969 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
5970 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
5971 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
5972 | stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23 | |
5973 | stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22 | |
5974 | stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21 | |
5975 | stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20 | |
5976 | ||
5977 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
5978 | ||
5979 | bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine | |
5980 | lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr | |
5981 | lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt | |
5982 | lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr | |
5983 | b grlStart ; Get to it | |
5984 | grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr | |
5985 | ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt | |
5986 | ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr | |
5987 | ||
5988 | grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode | |
5989 | xor r29,r3,r9 ; Convert pmap_t virt->real | |
5990 | mr r30,r11 ; Save caller's msr image | |
5991 | ||
5992 | la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
5993 | bl sxlkExclusive ; Get lock exclusive | |
5994 | ||
5995 | li r28,0 ; r28 <- index of first active map word to search | |
5996 | lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number | |
5997 | b grlMap1st ; Examine first map word | |
5998 | ||
5999 | .align 5 | |
6000 | grlNextMap: stw r22,0(r21) ; Save updated map word | |
6001 | addi r28,r28,1 ; Increment map word index | |
6002 | cmplwi r28,GV_MAP_WORDS ; See if we're done | |
6003 | beq grlDone ; Yup, let's get outta here | |
6004 | ||
6005 | grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array | |
6006 | rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK | |
6007 | ; Convert map index into map index offset | |
6008 | add r20,r20,r11 ; Calculate map array element address | |
6009 | lwz r22,0(r20) ; Get active map word at index | |
6010 | mr. r23,r22 ; Any active mappings indicated? | |
6011 | beq grlNextMap ; Nope, check next word | |
6012 | ||
6013 | la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index | |
6014 | rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK | |
6015 | ; Extract page index from map word index and convert | |
6016 | ; into page physical index offset | |
6017 | add r21,r21,r11 ; Calculate page physical index entry address | |
6018 | bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit | |
6019 | lwz r21,4(r21) ; Get selected hash table page's address | |
6020 | b grlLoop ; Examine all slots in this page | |
6021 | grl64Page: ld r21,0(r21) ; Get selected hash table page's address | |
6022 | b grlLoop ; Examine all slots in this page | |
6023 | ||
6024 | .align 5 | |
6025 | grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word | |
6026 | cmplwi r11,32 ; Any active mappings left in this word? | |
6027 | lis r12,0x8000 ; Prepare mask to reset bit | |
6028 | srw r12,r12,r11 ; Position mask bit | |
6029 | andc r23,r23,r12 ; Reset lit bit | |
6030 | beq grlNextMap ; No bits lit, examine next map word | |
6031 | ||
6032 | slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number | |
6033 | rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK | |
6034 | ; Extract slot band number from index and insert | |
6035 | add r31,r31,r21 ; Add hash page address yielding mapping slot address | |
6036 | ||
6037 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
6038 | lhz r4,mpSpace(r31) ; Get mapping's space ID number | |
6039 | rlwinm r5,r3,0,mpgGlobal ; Extract global bit | |
6040 | xor r4,r4,r27 ; Compare space ID number | |
6041 | or. r4,r4,r5 ; (space id miss || global) | |
6042 | bne grlLoop ; Not one of ours, skip it | |
6043 | andc r22,r22,r12 ; Reset active bit corresponding to this mapping | |
6044 | ori r3,r3,mpgDormant ; Mark entry dormant | |
6045 | stw r3,mpFlags(r31) ; Update mapping's flags | |
6046 | ||
6047 | bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately | |
6048 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6049 | ; r31 <- mapping's physical address | |
6050 | ; r3 -> PTE slot physical address | |
6051 | ; r4 -> High-order 32 bits of PTE | |
6052 | ; r5 -> Low-order 32 bits of PTE | |
6053 | ; r6 -> PCA | |
6054 | ; r7 -> PCA physical address | |
6055 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
6056 | b grlFreePTE ; Join 64-bit path to release the PTE | |
6057 | grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
6058 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
6059 | grlFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
6060 | beq- grlLoop ; No valid PTE, we're done with this mapping | |
6061 | lis r0,0x8000 ; Prepare free bit for this slot | |
6062 | srw r0,r0,r2 ; Position free bit | |
6063 | or r6,r6,r0 ; Set it in our PCA image | |
6064 | lwz r8,mpPte(r31) ; Get PTE pointer | |
6065 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
6066 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
6067 | eieio ; Synchronize all previous updates (mapInvPtexx doesn't) | |
6068 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
6069 | b grlLoop ; On to next active mapping in this map word | |
6070 | ||
6071 | grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
6072 | bl sxlkUnlock ; Release host pmap's search lock | |
6073 | ||
6074 | bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately | |
6075 | mtmsr r30 ; Restore 'rupts, translation | |
6076 | isync ; Throw a small wrench into the pipeline | |
6077 | b grlPopFrame ; Nothing to do now but pop a frame and return | |
6078 | grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode | |
6079 | grlPopFrame: | |
6080 | lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6081 | ; Get caller's return address | |
6082 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
6083 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
6084 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
6085 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
6086 | mtlr r0 ; Prepare return address | |
6087 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
6088 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
6089 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
6090 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
6091 | lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23 | |
6092 | lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22 | |
6093 | lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21 | |
6094 | lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20 | |
6095 | lwz r1,0(r1) ; Pop stack frame | |
6096 | blr ; Return to caller | |
6097 | ||
6098 | ||
6099 | ; | |
6100 | ; Guest shadow assist -- resume a guest mapping | |
6101 | ; | |
6102 | ; Locates the specified dormant mapping, and if it exists validates it and makes it | |
6103 | ; active. | |
6104 | ; | |
6105 | ; Parameters: | |
6106 | ; r3 : address of host pmap, 32-bit kernel virtual address | |
6107 | ; r4 : address of guest pmap, 32-bit kernel virtual address | |
6108 | ; r5 : host virtual address, high-order 32 bits | |
6109 | ; r6 : host virtual address, low-order 32 bits | |
6110 | ; r7 : guest virtual address, high-order 32 bits | |
6111 | ; r8 : guest virtual address, low-order 32 bits | |
6112 | ; r9 : guest mapping protection code | |
6113 | ; | |
6114 | ; Non-volatile register usage: | |
6115 | ; r23 : VMM extension block's physical address | |
6116 | ; r24 : physent physical address | |
6117 | ; r25 : caller's msr image from mapSetUp | |
6118 | ; r26 : guest mapping protection code | |
6119 | ; r27 : host pmap physical address | |
6120 | ; r28 : guest pmap physical address | |
6121 | ; r29 : host virtual address | |
6122 | ; r30 : guest virtual address | |
6123 | ; r31 : gva->phys mapping's physical address | |
6124 | ; | |
6125 | .align 5 | |
6126 | .globl EXT(hw_res_map_gv) | |
6127 | ||
6128 | LEXT(hw_res_map_gv) | |
6129 | ||
6130 | #define grsStackSize ((31-23+1)*4)+4 | |
6131 | ||
6132 | stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1) | |
6133 | ; Mint a new stack frame | |
6134 | mflr r0 ; Get caller's return address | |
6135 | mfsprg r11,2 ; Get feature flags | |
6136 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
6137 | stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6138 | ; Save caller's return address | |
6139 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
6140 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
6141 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
6142 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
6143 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
6144 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
6145 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
6146 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
6147 | stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23 | |
6148 | ||
6149 | rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr | |
6150 | rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr | |
6151 | mr r26,r9 ; Copy guest mapping protection code | |
6152 | ||
6153 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
6154 | lwz r9,pmapSpace(r4) ; r9 <- guest space ID number | |
6155 | bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately | |
6156 | lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr | |
6157 | lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt | |
6158 | lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt | |
6159 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6160 | srwi r11,r30,12 ; Form shadow hash: | |
6161 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6162 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6163 | ; Form index offset from hash page number | |
6164 | add r31,r31,r10 ; r31 <- hash page index entry | |
6165 | lwz r31,4(r31) ; r31 <- hash page paddr | |
6166 | rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
6167 | ; r31 <- hash group paddr | |
6168 | b grsStart ; Get to it | |
6169 | ||
6170 | grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr | |
6171 | rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr | |
6172 | ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr | |
6173 | ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt | |
6174 | ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt | |
6175 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6176 | srwi r11,r30,12 ; Form shadow hash: | |
6177 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6178 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6179 | ; Form index offset from hash page number | |
6180 | add r31,r31,r10 ; r31 <- hash page index entry | |
6181 | ld r31,0(r31) ; r31 <- hash page paddr | |
6182 | insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
6183 | ; r31 <- hash group paddr | |
6184 | ||
6185 | grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real | |
6186 | xor r28,r4,r28 ; Convert guest pmap_t virt->real | |
6187 | bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode | |
6188 | mr r25,r11 ; Save caller's msr image | |
6189 | ||
6190 | la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
6191 | bl sxlkExclusive ; Get lock exclusive | |
6192 | ||
6193 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6194 | mtctr r0 ; in this group | |
6195 | bt++ pf64Bitb,grs64Search ; Test for 64-bit machine | |
6196 | ||
6197 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6198 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6199 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
6200 | b grs32SrchLp ; Let the search begin! | |
6201 | ||
6202 | .align 5 | |
6203 | grs32SrchLp: | |
6204 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6205 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6206 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6207 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6208 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6209 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
6210 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6211 | xor r7,r7,r9 ; Compare space ID | |
6212 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6213 | xor r8,r8,r30 ; Compare virtual address | |
6214 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6215 | beq grsSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6216 | ||
6217 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6218 | bdnz grs32SrchLp ; Iterate | |
6219 | ||
6220 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6221 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
6222 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6223 | xor r4,r4,r9 ; Compare space ID | |
6224 | or r0,r11,r4 ; r0 <- !(!free && space match) | |
6225 | xor r5,r5,r30 ; Compare virtual address | |
6226 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
6227 | beq grsSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6228 | b grsSrchMiss ; No joy in our hash group | |
6229 | ||
6230 | grs64Search: | |
6231 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6232 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6233 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
6234 | b grs64SrchLp ; Let the search begin! | |
6235 | ||
6236 | .align 5 | |
6237 | grs64SrchLp: | |
6238 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6239 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6240 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6241 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6242 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6243 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
6244 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6245 | xor r7,r7,r9 ; Compare space ID | |
6246 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6247 | xor r8,r8,r30 ; Compare virtual address | |
6248 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6249 | beq grsSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6250 | ||
6251 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6252 | bdnz grs64SrchLp ; Iterate | |
6253 | ||
6254 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6255 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
6256 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6257 | xor r4,r4,r9 ; Compare space ID | |
6258 | or r0,r11,r4 ; r0 <- !(!free && space match) | |
6259 | xor r5,r5,r30 ; Compare virtual address | |
6260 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
6261 | bne grsSrchMiss ; No joy in our hash group | |
6262 | ||
6263 | grsSrchHit: | |
6264 | rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant? | |
6265 | bne grsFindHost ; Yes, nothing to disconnect | |
6266 | ||
6267 | bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately | |
6268 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6269 | ; r31 <- mapping's physical address | |
6270 | ; r3 -> PTE slot physical address | |
6271 | ; r4 -> High-order 32 bits of PTE | |
6272 | ; r5 -> Low-order 32 bits of PTE | |
6273 | ; r6 -> PCA | |
6274 | ; r7 -> PCA physical address | |
6275 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
6276 | b grsFreePTE ; Join 64-bit path to release the PTE | |
6277 | grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
6278 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
6279 | grsFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
6280 | beq- grsFindHost ; No valid PTE, we're almost done | |
6281 | lis r0,0x8000 ; Prepare free bit for this slot | |
6282 | srw r0,r0,r2 ; Position free bit | |
6283 | or r6,r6,r0 ; Set it in our PCA image | |
6284 | lwz r8,mpPte(r31) ; Get PTE pointer | |
6285 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
6286 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
6287 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
6288 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
6289 | ||
6290 | grsFindHost: | |
6291 | ||
6292 | // We now have a dormant guest mapping that matches our space id and virtual address. Our next | |
6293 | // step is to locate the host mapping that completes the guest mapping's connection to a physical | |
6294 | // frame. The guest and host mappings must connect to the same physical frame, so they must both | |
6295 | // be chained on the same physent. We search the physent chain for a host mapping matching our | |
6296 | // host's space id and the host virtual address. If we succeed, we know that the entire chain | |
6297 | // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be | |
6298 | // resumed. If we fail to find the specified host virtual->physical mapping, it is because the | |
6299 | // host virtual or physical address has changed since the guest mapping was suspended, so it | |
6300 | // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell | |
6301 | // our caller that it will have to take its long path, translating the host virtual address | |
6302 | // through the host's skiplist and installing a new guest mapping. | |
6303 | ||
6304 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
6305 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
6306 | mr. r24,r3 ; Got lock on our physent? | |
6307 | beq-- grsBadPLock ; No, time to bail out | |
6308 | ||
6309 | bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search | |
6310 | ||
6311 | lwz r9,ppLink+4(r24) ; Get first mapping on physent | |
6312 | lwz r6,pmapSpace(r27) ; Get host pmap's space id number | |
6313 | rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags | |
6314 | grsPELoop: mr. r12,r9 ; Got a mapping to look at? | |
6315 | beq- grsPEMiss ; Nope, we've missed hva->phys mapping | |
6316 | lwz r7,mpFlags(r12) ; Get mapping's flags | |
6317 | lhz r4,mpSpace(r12) ; Get mapping's space id number | |
6318 | lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address | |
6319 | lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain | |
6320 | ||
6321 | rlwinm r0,r7,0,mpType ; Isolate mapping's type | |
6322 | rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags | |
6323 | xori r0,r0,mpNormal ; Normal mapping? | |
6324 | xor r4,r4,r6 ; Compare w/ host space id number | |
6325 | xor r5,r5,r29 ; Compare w/ host virtual address | |
6326 | or r0,r0,r4 ; r0 <- (wrong type || !space id) | |
6327 | or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit) | |
6328 | beq grsPEHit ; Hit | |
6329 | b grsPELoop ; Iterate | |
6330 | ||
6331 | grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer | |
6332 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6333 | ld r9,ppLink(r24) ; Get first mapping on physent | |
6334 | lwz r6,pmapSpace(r27) ; Get pmap's space id number | |
6335 | andc r9,r9,r0 ; Cleanup mapping pointer | |
6336 | grsPELp64: mr. r12,r9 ; Got a mapping to look at? | |
6337 | beq-- grsPEMiss ; Nope, we've missed hva->phys mapping | |
6338 | lwz r7,mpFlags(r12) ; Get mapping's flags | |
6339 | lhz r4,mpSpace(r12) ; Get mapping's space id number | |
6340 | ld r5,mpVAddr(r12) ; Get mapping's virtual address | |
6341 | ld r9,mpAlias(r12) ; Next mapping physent alias chain | |
6342 | rlwinm r0,r7,0,mpType ; Isolate mapping's type | |
6343 | rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags | |
6344 | xori r0,r0,mpNormal ; Normal mapping? | |
6345 | xor r4,r4,r6 ; Compare w/ host space id number | |
6346 | xor r5,r5,r29 ; Compare w/ host virtual address | |
6347 | or r0,r0,r4 ; r0 <- (wrong type || !space id) | |
6348 | or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit) | |
6349 | beq grsPEHit ; Hit | |
6350 | b grsPELp64 ; Iterate | |
6351 | ||
6352 | grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits | |
6353 | rlwimi r0,r26,0,mpPP ; Insert new protection bits | |
6354 | stw r0,mpVAddr+4(r31) ; Write 'em back | |
6355 | ||
6356 | eieio ; Ensure previous mapping updates are visible | |
6357 | lwz r0,mpFlags(r31) ; Get flags | |
6358 | rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag | |
6359 | stw r0,mpFlags(r31) ; Set updated flags, entry is now valid | |
6360 | ||
6361 | li r31,mapRtOK ; Indicate success | |
6362 | b grsRelPhy ; Exit through physent lock release | |
6363 | ||
6364 | grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor | |
6365 | bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine | |
6366 | la r11,ppLink+4(r24) ; Point to chain anchor | |
6367 | lwz r9,ppLink+4(r24) ; Get chain anchor | |
6368 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
6369 | grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good | |
6370 | cmplw r9,r31 ; Is this the mapping to remove? | |
6371 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
6372 | bne grsRemNext ; No, chain onward | |
6373 | bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor | |
6374 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
6375 | b grsDelete ; Finish deleting mapping | |
6376 | grsRemRetry: | |
6377 | lwarx r0,0,r11 ; Get previous link | |
6378 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
6379 | stwcx. r0,0,r11 ; Update previous link | |
6380 | bne- grsRemRetry ; Lost reservation, retry | |
6381 | b grsDelete ; Finish deleting mapping | |
6382 | ||
6383 | .align 5 | |
6384 | grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
6385 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6386 | mr. r9,r8 ; Does next entry exist? | |
6387 | b grsRemLoop ; Carry on | |
6388 | ||
6389 | grsRemove64: | |
6390 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
6391 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6392 | la r11,ppLink(r24) ; Point to chain anchor | |
6393 | ld r9,ppLink(r24) ; Get chain anchor | |
6394 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
6395 | grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good | |
6396 | cmpld r9,r31 ; Is this the mapping to remove? | |
6397 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
6398 | bne grsRem64Nxt ; Not mapping to remove, chain on, dude | |
6399 | bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor | |
6400 | std r8,0(r11) ; Unchain gpv->phys mapping | |
6401 | b grsDelete ; Finish deleting mapping | |
6402 | grsRem64Rt: ldarx r0,0,r11 ; Get previous link | |
6403 | and r0,r0,r7 ; Get flags | |
6404 | or r0,r0,r8 ; Insert new forward pointer | |
6405 | stdcx. r0,0,r11 ; Slam it back in | |
6406 | bne-- grsRem64Rt ; Lost reservation, retry | |
6407 | b grsDelete ; Finish deleting mapping | |
6408 | ||
6409 | .align 5 | |
6410 | grsRem64Nxt: | |
6411 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
6412 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6413 | mr. r9,r8 ; Does next entry exist? | |
6414 | b grsRem64Lp ; Carry on | |
6415 | ||
6416 | grsDelete: | |
6417 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
6418 | rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags | |
6419 | ori r3,r3,mpgFree ; Mark mapping free | |
6420 | stw r3,mpFlags(r31) ; Update flags | |
6421 | ||
6422 | li r31,mapRtNotFnd ; Didn't succeed | |
6423 | ||
6424 | grsRelPhy: mr r3,r24 ; r3 <- physent addr | |
6425 | bl mapPhysUnlock ; Unlock physent chain | |
6426 | ||
6427 | grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
6428 | bl sxlkUnlock ; Release host pmap search lock | |
6429 | ||
6430 | grsRtn: mr r3,r31 ; r3 <- result code | |
6431 | bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately | |
6432 | mtmsr r25 ; Restore 'rupts, translation | |
6433 | isync ; Throw a small wrench into the pipeline | |
6434 | b grsPopFrame ; Nothing to do now but pop a frame and return | |
6435 | grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode | |
6436 | grsPopFrame: | |
6437 | lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6438 | ; Get caller's return address | |
6439 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
6440 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
6441 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
6442 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
6443 | mtlr r0 ; Prepare return address | |
6444 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
6445 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
6446 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
6447 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
6448 | lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23 | |
6449 | lwz r1,0(r1) ; Pop stack frame | |
6450 | blr ; Return to caller | |
6451 | ||
6452 | .align 5 | |
6453 | grsSrchMiss: | |
6454 | li r31,mapRtNotFnd ; Could not locate requested mapping | |
6455 | b grsRelPmap ; Exit through host pmap search lock release | |
6456 | ||
6457 | grsBadPLock: | |
6458 | grsPEMissMiss: | |
6459 | lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the | |
6460 | ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb? | |
6461 | li r3,failMapping ; The BOMB, Dmitri. | |
6462 | sc ; The hydrogen bomb. | |
6463 | ||
6464 | ||
6465 | ; | |
6466 | ; Guest shadow assist -- add a guest mapping | |
6467 | ; | |
6468 | ; Adds a guest mapping. | |
6469 | ; | |
6470 | ; Parameters: | |
6471 | ; r3 : address of host pmap, 32-bit kernel virtual address | |
6472 | ; r4 : address of guest pmap, 32-bit kernel virtual address | |
6473 | ; r5 : guest virtual address, high-order 32 bits | |
6474 | ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags) | |
6475 | ; r7 : new mapping's flags | |
6476 | ; r8 : physical address, 32-bit page number | |
6477 | ; | |
6478 | ; Non-volatile register usage: | |
6479 | ; r22 : hash group's physical address | |
6480 | ; r23 : VMM extension block's physical address | |
6481 | ; r24 : mapping's flags | |
6482 | ; r25 : caller's msr image from mapSetUp | |
6483 | ; r26 : physent physical address | |
6484 | ; r27 : host pmap physical address | |
6485 | ; r28 : guest pmap physical address | |
6486 | ; r29 : physical address, 32-bit 4k-page number | |
6487 | ; r30 : guest virtual address | |
6488 | ; r31 : gva->phys mapping's physical address | |
6489 | ; | |
6490 | ||
6491 | .align 5 | |
6492 | .globl EXT(hw_add_map_gv) | |
6493 | ||
6494 | ||
6495 | LEXT(hw_add_map_gv) | |
6496 | ||
6497 | #define gadStackSize ((31-22+1)*4)+4 | |
6498 | ||
6499 | stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1) | |
6500 | ; Mint a new stack frame | |
6501 | mflr r0 ; Get caller's return address | |
6502 | mfsprg r11,2 ; Get feature flags | |
6503 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
6504 | stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6505 | ; Save caller's return address | |
6506 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
6507 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
6508 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
6509 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
6510 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
6511 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
6512 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
6513 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
6514 | stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23 | |
6515 | stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22 | |
6516 | ||
6517 | rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr | |
6518 | rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr | |
6519 | mr r24,r7 ; Copy guest mapping's flags | |
6520 | mr r29,r8 ; Copy target frame's physical address | |
6521 | ||
6522 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
6523 | lwz r9,pmapSpace(r4) ; r9 <- guest space ID number | |
6524 | bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine | |
6525 | lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr | |
6526 | lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt | |
6527 | lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt | |
6528 | la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index | |
6529 | srwi r11,r30,12 ; Form shadow hash: | |
6530 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6531 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6532 | ; Form index offset from hash page number | |
6533 | add r22,r22,r10 ; r22 <- hash page index entry | |
6534 | lwz r22,4(r22) ; r22 <- hash page paddr | |
6535 | rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
6536 | ; r22 <- hash group paddr | |
6537 | b gadStart ; Get to it | |
6538 | ||
6539 | gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr | |
6540 | ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt | |
6541 | ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt | |
6542 | la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index | |
6543 | srwi r11,r30,12 ; Form shadow hash: | |
6544 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6545 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6546 | ; Form index offset from hash page number | |
6547 | add r22,r22,r10 ; r22 <- hash page index entry | |
6548 | ld r22,0(r22) ; r22 <- hash page paddr | |
6549 | insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
6550 | ; r22 <- hash group paddr | |
6551 | ||
6552 | gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real | |
6553 | xor r28,r4,r28 ; Convert guest pmap_t virt->real | |
6554 | bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode | |
6555 | mr r25,r11 ; Save caller's msr image | |
6556 | ||
6557 | la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
6558 | bl sxlkExclusive ; Get lock exlusive | |
6559 | ||
6560 | mr r31,r22 ; Prepare to search this group | |
6561 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6562 | mtctr r0 ; in this group | |
6563 | bt++ pf64Bitb,gad64Search ; Test for 64-bit machine | |
6564 | ||
6565 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6566 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6567 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
6568 | clrrwi r12,r30,12 ; r12 <- virtual address we're searching for | |
6569 | b gad32SrchLp ; Let the search begin! | |
6570 | ||
6571 | .align 5 | |
6572 | gad32SrchLp: | |
6573 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6574 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6575 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6576 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6577 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6578 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
6579 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6580 | xor r7,r7,r9 ; Compare space ID | |
6581 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6582 | xor r8,r8,r12 ; Compare virtual address | |
6583 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6584 | beq gadRelPmap ; Join common path on hit (r31 points to guest mapping) | |
6585 | ||
6586 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6587 | bdnz gad32SrchLp ; Iterate | |
6588 | ||
6589 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6590 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
6591 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6592 | xor r4,r4,r9 ; Compare space ID | |
6593 | or r0,r11,r4 ; r0 <- !(!free && && space match) | |
6594 | xor r5,r5,r12 ; Compare virtual address | |
6595 | or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match | |
6596 | beq gadRelPmap ; Join common path on hit (r31 points to guest mapping) | |
6597 | b gadScan ; No joy in our hash group | |
6598 | ||
6599 | gad64Search: | |
6600 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6601 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6602 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
6603 | clrrdi r12,r30,12 ; r12 <- virtual address we're searching for | |
6604 | b gad64SrchLp ; Let the search begin! | |
6605 | ||
6606 | .align 5 | |
6607 | gad64SrchLp: | |
6608 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6609 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6610 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6611 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6612 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6613 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
6614 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6615 | xor r7,r7,r9 ; Compare space ID | |
6616 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6617 | xor r8,r8,r12 ; Compare virtual address | |
6618 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6619 | beq gadRelPmap ; Hit, let upper-level redrive sort it out | |
6620 | ||
6621 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6622 | bdnz gad64SrchLp ; Iterate | |
6623 | ||
6624 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6625 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
6626 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6627 | xor r4,r4,r9 ; Compare space ID | |
6628 | or r0,r11,r4 ; r0 <- !(!free && && space match) | |
6629 | xor r5,r5,r12 ; Compare virtual address | |
6630 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
6631 | bne gadScan ; No joy in our hash group | |
6632 | b gadRelPmap ; Hit, let upper-level redrive sort it out | |
6633 | ||
6634 | gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor | |
6635 | rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2) | |
6636 | ; Prepare to address slot at cursor | |
6637 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6638 | mtctr r0 ; in this group | |
6639 | or r2,r22,r12 ; r2 <- 1st mapping to search | |
6640 | lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags | |
6641 | li r11,0 ; No dormant entries found yet | |
6642 | b gadScanLoop ; Let the search begin! | |
6643 | ||
6644 | .align 5 | |
6645 | gadScanLoop: | |
6646 | addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search | |
6647 | rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2) | |
6648 | ; Trim off any carry, wrapping into slot number range | |
6649 | mr r31,r2 ; r31 <- current mapping's address | |
6650 | or r2,r22,r12 ; r2 <- next mapping to search | |
6651 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6652 | lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags | |
6653 | rlwinm. r0,r6,0,mpgFree ; Test free flag | |
6654 | bne gadFillMap ; Join common path on hit (r31 points to free mapping) | |
6655 | rlwinm r0,r6,0,mpgDormant ; Dormant entry? | |
6656 | xori r0,r0,mpgDormant ; Invert dormant flag | |
6657 | or. r0,r0,r11 ; Skip all but the first dormant entry we see | |
6658 | bne gadNotDorm ; Not dormant or we've already seen one | |
6659 | mr r11,r31 ; We'll use this dormant entry if we don't find a free one first | |
6660 | gadNotDorm: bdnz gadScanLoop ; Iterate | |
6661 | ||
6662 | mr r31,r2 ; r31 <- final mapping's address | |
6663 | rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping | |
6664 | bne gadFillMap ; Join common path on hit (r31 points to dormant mapping) | |
6665 | rlwinm r0,r6,0,mpgDormant ; Dormant entry? | |
6666 | xori r0,r0,mpgDormant ; Invert dormant flag | |
6667 | or. r0,r0,r11 ; Skip all but the first dormant entry we see | |
6668 | bne gadCkDormant ; Not dormant or we've already seen one | |
6669 | mr r11,r31 ; We'll use this dormant entry if we don't find a free one first | |
6670 | ||
6671 | gadCkDormant: | |
6672 | mr. r31,r11 ; Get dormant mapping, if any, and test | |
6673 | bne gadUpCursor ; Go update the cursor, we'll take the dormant entry | |
6674 | ||
6675 | gadSteal: | |
6676 | lbz r12,mpgCursor(r22) ; Get group's cursor | |
6677 | rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2) | |
6678 | ; Prepare to address slot at cursor | |
6679 | or r31,r22,r12 ; r31 <- address of mapping to steal | |
6680 | ||
6681 | bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately | |
6682 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6683 | ; r31 <- mapping's physical address | |
6684 | ; r3 -> PTE slot physical address | |
6685 | ; r4 -> High-order 32 bits of PTE | |
6686 | ; r5 -> Low-order 32 bits of PTE | |
6687 | ; r6 -> PCA | |
6688 | ; r7 -> PCA physical address | |
6689 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
6690 | b gadFreePTE ; Join 64-bit path to release the PTE | |
6691 | gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
6692 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
6693 | gadFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
6694 | beq- gadUpCursor ; No valid PTE, we're almost done | |
6695 | lis r0,0x8000 ; Prepare free bit for this slot | |
6696 | srw r0,r0,r2 ; Position free bit | |
6697 | or r6,r6,r0 ; Set it in our PCA image | |
6698 | lwz r8,mpPte(r31) ; Get PTE pointer | |
6699 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
6700 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
6701 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
6702 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
6703 | ||
6704 | gadUpCursor: | |
6705 | rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK | |
6706 | ; Recover slot number from stolen mapping's address | |
6707 | addi r12,r12,1 ; Increment slot number | |
6708 | rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range | |
6709 | stb r12,mpgCursor(r22) ; Update group's cursor | |
6710 | ||
6711 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
6712 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
6713 | mr. r26,r3 ; Got lock on our physent? | |
6714 | beq-- gadBadPLock ; No, time to bail out | |
6715 | ||
6716 | crset cr1_eq ; cr1_eq <- previous link is the anchor | |
6717 | bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine | |
6718 | la r11,ppLink+4(r26) ; Point to chain anchor | |
6719 | lwz r9,ppLink+4(r26) ; Get chain anchor | |
6720 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
6721 | gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good | |
6722 | cmplw r9,r31 ; Is this the mapping to remove? | |
6723 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
6724 | bne gadRemNext ; No, chain onward | |
6725 | bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor | |
6726 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
6727 | b gadDelDone ; Finish deleting mapping | |
6728 | gadRemRetry: | |
6729 | lwarx r0,0,r11 ; Get previous link | |
6730 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
6731 | stwcx. r0,0,r11 ; Update previous link | |
6732 | bne- gadRemRetry ; Lost reservation, retry | |
6733 | b gadDelDone ; Finish deleting mapping | |
6734 | ||
6735 | gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
6736 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6737 | mr. r9,r8 ; Does next entry exist? | |
6738 | b gadRemLoop ; Carry on | |
6739 | ||
6740 | gadRemove64: | |
6741 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
6742 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6743 | la r11,ppLink(r26) ; Point to chain anchor | |
6744 | ld r9,ppLink(r26) ; Get chain anchor | |
6745 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
6746 | gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good | |
6747 | cmpld r9,r31 ; Is this the mapping to remove? | |
6748 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
6749 | bne gadRem64Nxt ; Not mapping to remove, chain on, dude | |
6750 | bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor | |
6751 | std r8,0(r11) ; Unchain gpv->phys mapping | |
6752 | b gadDelDone ; Finish deleting mapping | |
6753 | gadRem64Rt: ldarx r0,0,r11 ; Get previous link | |
6754 | and r0,r0,r7 ; Get flags | |
6755 | or r0,r0,r8 ; Insert new forward pointer | |
6756 | stdcx. r0,0,r11 ; Slam it back in | |
6757 | bne-- gadRem64Rt ; Lost reservation, retry | |
6758 | b gadDelDone ; Finish deleting mapping | |
6759 | ||
6760 | .align 5 | |
6761 | gadRem64Nxt: | |
6762 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
6763 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6764 | mr. r9,r8 ; Does next entry exist? | |
6765 | b gadRem64Lp ; Carry on | |
6766 | ||
6767 | gadDelDone: | |
6768 | mr r3,r26 ; Get physent address | |
6769 | bl mapPhysUnlock ; Unlock physent chain | |
6770 | ||
6771 | gadFillMap: | |
6772 | lwz r12,pmapSpace(r28) ; Get guest space id number | |
6773 | li r2,0 ; Get a zero | |
6774 | stw r24,mpFlags(r31) ; Set mapping's flags | |
6775 | sth r12,mpSpace(r31) ; Set mapping's space id number | |
6776 | stw r2,mpPte(r31) ; Set mapping's pte pointer invalid | |
6777 | stw r29,mpPAddr(r31) ; Set mapping's physical address | |
6778 | bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine | |
6779 | stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags) | |
6780 | b gadChain ; Continue with chaining mapping to physent | |
6781 | gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags) | |
6782 | ||
6783 | gadChain: mr r3,r29 ; r3 <- physical frame address | |
6784 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
6785 | mr. r26,r3 ; Got lock on our physent? | |
6786 | beq-- gadBadPLock ; No, time to bail out | |
6787 | ||
6788 | bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine | |
6789 | lwz r12,ppLink+4(r26) ; Get forward chain | |
6790 | rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags | |
6791 | rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags | |
6792 | stw r11,mpAlias+4(r31) ; New mapping will head chain | |
6793 | stw r12,ppLink+4(r26) ; Point physent to new mapping | |
6794 | b gadFinish ; All over now... | |
6795 | ||
6796 | gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
6797 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6798 | ld r12,ppLink(r26) ; Get forward chain | |
6799 | andc r11,r12,r7 ; Get physent's forward chain pointer sans flags | |
6800 | and r12,r12,r7 ; Isolate pointer's flags | |
6801 | or r12,r12,r31 ; Insert new mapping's address forming pointer | |
6802 | std r11,mpAlias(r31) ; New mapping will head chain | |
6803 | std r12,ppLink(r26) ; Point physent to new mapping | |
6804 | ||
6805 | gadFinish: eieio ; Ensure new mapping is completely visible | |
6806 | ||
6807 | gadRelPhy: mr r3,r26 ; r3 <- physent addr | |
6808 | bl mapPhysUnlock ; Unlock physent chain | |
6809 | ||
6810 | gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
6811 | bl sxlkUnlock ; Release host pmap search lock | |
6812 | ||
6813 | bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately | |
6814 | mtmsr r25 ; Restore 'rupts, translation | |
6815 | isync ; Throw a small wrench into the pipeline | |
6816 | b gadPopFrame ; Nothing to do now but pop a frame and return | |
6817 | gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode | |
6818 | gadPopFrame: | |
6819 | lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6820 | ; Get caller's return address | |
6821 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
6822 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
6823 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
6824 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
6825 | mtlr r0 ; Prepare return address | |
6826 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
6827 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
6828 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
6829 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
6830 | lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23 | |
6831 | lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22 | |
6832 | lwz r1,0(r1) ; Pop stack frame | |
6833 | blr ; Return to caller | |
6834 | ||
6835 | gadPEMissMiss: | |
6836 | gadBadPLock: | |
6837 | lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the | |
6838 | ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb? | |
6839 | li r3,failMapping ; The BOMB, Dmitri. | |
6840 | sc ; The hydrogen bomb. | |
6841 | ||
6842 | ||
6843 | ; | |
6844 | ; Guest shadow assist -- supend a guest mapping | |
6845 | ; | |
6846 | ; Suspends a guest mapping. | |
6847 | ; | |
6848 | ; Parameters: | |
6849 | ; r3 : address of host pmap, 32-bit kernel virtual address | |
6850 | ; r4 : address of guest pmap, 32-bit kernel virtual address | |
6851 | ; r5 : guest virtual address, high-order 32 bits | |
6852 | ; r6 : guest virtual address, low-order 32 bits | |
6853 | ; | |
6854 | ; Non-volatile register usage: | |
6855 | ; r26 : VMM extension block's physical address | |
6856 | ; r27 : host pmap physical address | |
6857 | ; r28 : guest pmap physical address | |
6858 | ; r29 : caller's msr image from mapSetUp | |
6859 | ; r30 : guest virtual address | |
6860 | ; r31 : gva->phys mapping's physical address | |
6861 | ; | |
6862 | ||
6863 | .align 5 | |
6864 | .globl EXT(hw_susp_map_gv) | |
6865 | ||
6866 | LEXT(hw_susp_map_gv) | |
6867 | ||
6868 | #define gsuStackSize ((31-26+1)*4)+4 | |
6869 | ||
6870 | stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1) | |
6871 | ; Mint a new stack frame | |
6872 | mflr r0 ; Get caller's return address | |
6873 | mfsprg r11,2 ; Get feature flags | |
6874 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
6875 | stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6876 | ; Save caller's return address | |
6877 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
6878 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
6879 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
6880 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
6881 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
6882 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
6883 | ||
6884 | rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr | |
6885 | ||
6886 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
6887 | lwz r9,pmapSpace(r4) ; r9 <- guest space ID number | |
6888 | bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine | |
6889 | ||
6890 | lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr | |
6891 | lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt | |
6892 | lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt | |
6893 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6894 | srwi r11,r30,12 ; Form shadow hash: | |
6895 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6896 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6897 | ; Form index offset from hash page number | |
6898 | add r31,r31,r10 ; r31 <- hash page index entry | |
6899 | lwz r31,4(r31) ; r31 <- hash page paddr | |
6900 | rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
6901 | ; r31 <- hash group paddr | |
6902 | b gsuStart ; Get to it | |
6903 | gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr | |
6904 | ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr | |
6905 | ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt | |
6906 | ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt | |
6907 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6908 | srwi r11,r30,12 ; Form shadow hash: | |
6909 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6910 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6911 | ; Form index offset from hash page number | |
6912 | add r31,r31,r10 ; r31 <- hash page index entry | |
6913 | ld r31,0(r31) ; r31 <- hash page paddr | |
6914 | insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
6915 | ; r31 <- hash group paddr | |
6916 | ||
6917 | gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real | |
6918 | xor r28,r4,r28 ; Convert guest pmap_t virt->real | |
6919 | bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode | |
6920 | mr r29,r11 ; Save caller's msr image | |
6921 | ||
6922 | la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
6923 | bl sxlkExclusive ; Get lock exclusive | |
6924 | ||
6925 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6926 | mtctr r0 ; in this group | |
6927 | bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine | |
6928 | ||
6929 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6930 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6931 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
6932 | b gsu32SrchLp ; Let the search begin! | |
6933 | ||
6934 | .align 5 | |
6935 | gsu32SrchLp: | |
6936 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6937 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6938 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6939 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6940 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6941 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
6942 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6943 | xor r7,r7,r9 ; Compare space ID | |
6944 | or r0,r11,r7 ; r0 <- !(!free && !dormant && space match) | |
6945 | xor r8,r8,r30 ; Compare virtual address | |
6946 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6947 | beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6948 | ||
6949 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6950 | bdnz gsu32SrchLp ; Iterate | |
6951 | ||
6952 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6953 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
6954 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6955 | xor r4,r4,r9 ; Compare space ID | |
6956 | or r0,r11,r4 ; r0 <- !(!free && !dormant && space match) | |
6957 | xor r5,r5,r30 ; Compare virtual address | |
6958 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6959 | beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6960 | b gsuSrchMiss ; No joy in our hash group | |
6961 | ||
6962 | gsu64Search: | |
6963 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6964 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6965 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
6966 | b gsu64SrchLp ; Let the search begin! | |
6967 | ||
6968 | .align 5 | |
6969 | gsu64SrchLp: | |
6970 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6971 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6972 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6973 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6974 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6975 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
6976 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6977 | xor r7,r7,r9 ; Compare space ID | |
6978 | or r0,r11,r7 ; r0 <- !(!free && !dormant && space match) | |
6979 | xor r8,r8,r30 ; Compare virtual address | |
6980 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6981 | beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6982 | ||
6983 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6984 | bdnz gsu64SrchLp ; Iterate | |
6985 | ||
6986 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6987 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
6988 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6989 | xor r4,r4,r9 ; Compare space ID | |
6990 | or r0,r11,r4 ; r0 <- !(!free && !dormant && space match) | |
6991 | xor r5,r5,r30 ; Compare virtual address | |
6992 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6993 | bne gsuSrchMiss ; No joy in our hash group | |
6994 | ||
6995 | gsuSrchHit: | |
6996 | bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately | |
6997 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6998 | ; r31 <- mapping's physical address | |
6999 | ; r3 -> PTE slot physical address | |
7000 | ; r4 -> High-order 32 bits of PTE | |
7001 | ; r5 -> Low-order 32 bits of PTE | |
7002 | ; r6 -> PCA | |
7003 | ; r7 -> PCA physical address | |
7004 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
7005 | b gsuFreePTE ; Join 64-bit path to release the PTE | |
7006 | gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
7007 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
7008 | gsuFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
7009 | beq- gsuNoPTE ; No valid PTE, we're almost done | |
7010 | lis r0,0x8000 ; Prepare free bit for this slot | |
7011 | srw r0,r0,r2 ; Position free bit | |
7012 | or r6,r6,r0 ; Set it in our PCA image | |
7013 | lwz r8,mpPte(r31) ; Get PTE pointer | |
7014 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
7015 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
7016 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
7017 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
7018 | ||
7019 | gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags | |
7020 | ori r3,r3,mpgDormant ; Mark entry dormant | |
7021 | stw r3,mpFlags(r31) ; Save updated flags | |
7022 | eieio ; Ensure update is visible when we unlock | |
7023 | ||
7024 | gsuSrchMiss: | |
7025 | la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
7026 | bl sxlkUnlock ; Release host pmap search lock | |
7027 | ||
7028 | bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately | |
7029 | mtmsr r29 ; Restore 'rupts, translation | |
7030 | isync ; Throw a small wrench into the pipeline | |
7031 | b gsuPopFrame ; Nothing to do now but pop a frame and return | |
7032 | gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode | |
7033 | gsuPopFrame: | |
7034 | lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
7035 | ; Get caller's return address | |
7036 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
7037 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
7038 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
7039 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
7040 | mtlr r0 ; Prepare return address | |
7041 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
7042 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
7043 | lwz r1,0(r1) ; Pop stack frame | |
7044 | blr ; Return to caller | |
7045 | ||
7046 | ; | |
7047 | ; Guest shadow assist -- test guest mapping reference and change bits | |
7048 | ; | |
7049 | ; Locates the specified guest mapping, and if it exists gathers its reference | |
7050 |