]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8ad349bb | 4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ |
1c79356b | 5 | * |
8ad349bb A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
1c79356b A |
29 | */ |
30 | #include <assym.s> | |
31 | #include <debug.h> | |
1c79356b A |
32 | #include <db_machine_commands.h> |
33 | #include <mach_rt.h> | |
34 | ||
35 | #include <mach_debug.h> | |
36 | #include <ppc/asm.h> | |
37 | #include <ppc/proc_reg.h> | |
38 | #include <ppc/exception.h> | |
39 | #include <ppc/Performance.h> | |
40 | #include <ppc/exception.h> | |
1c79356b | 41 | #include <mach/ppc/vm_param.h> |
1c79356b A |
42 | |
43 | .text | |
44 | ||
55e303ae A |
45 | ; |
46 | ; 0 0 1 2 3 4 4 5 6 | |
47 | ; 0 8 6 4 2 0 8 6 3 | |
48 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
49 | ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA | |
50 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
51 | ; | |
52 | ; 0 0 1 | |
53 | ; 0 8 6 | |
54 | ; +--------+--------+--------+ | |
55 | ; |//////BB|BBBBBBBB|BBBB////| - SID - base | |
56 | ; +--------+--------+--------+ | |
57 | ; | |
58 | ; 0 0 1 | |
59 | ; 0 8 6 | |
60 | ; +--------+--------+--------+ | |
61 | ; |////////|11111111|111111//| - SID - copy 1 | |
62 | ; +--------+--------+--------+ | |
63 | ; | |
64 | ; 0 0 1 | |
65 | ; 0 8 6 | |
66 | ; +--------+--------+--------+ | |
67 | ; |////////|//222222|22222222| - SID - copy 2 | |
68 | ; +--------+--------+--------+ | |
69 | ; | |
70 | ; 0 0 1 | |
71 | ; 0 8 6 | |
72 | ; +--------+--------+--------+ | |
73 | ; |//////33|33333333|33//////| - SID - copy 3 - not needed | |
74 | ; +--------+--------+--------+ for 65 bit VPN | |
75 | ; | |
76 | ; 0 0 1 2 3 4 4 5 5 | |
77 | ; 0 8 6 4 2 0 8 1 5 | |
78 | ; +--------+--------+--------+--------+--------+--------+--------+ | |
79 | ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all | |
80 | ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed | |
81 | ; 0 0 1 2 3 4 4 5 5 | |
82 | ; 0 8 6 4 2 0 8 1 5 | |
83 | ; +--------+--------+--------+--------+--------+--------+--------+ | |
84 | ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA | |
85 | ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment" | |
86 | ; part of EA to make | |
87 | ; room for SID base | |
88 | ; | |
89 | ; | |
90 | ; 0 0 1 2 3 4 4 5 5 | |
91 | ; 0 8 6 4 2 0 8 1 5 | |
92 | ; +--------+--------+--------+--------+--------+--------+--------+ | |
93 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed | |
94 | ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA | |
95 | ; | |
96 | ; 0 0 1 2 3 4 4 5 6 7 7 | |
97 | ; 0 8 6 4 2 0 8 6 4 2 9 | |
98 | ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ | |
99 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN | |
100 | ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ | |
101 | ; | |
1c79356b A |
102 | |
103 | ||
55e303ae | 104 | /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping |
1c79356b | 105 | * |
55e303ae | 106 | * Maps a page or block into a pmap |
de355530 | 107 | * |
55e303ae | 108 | * Returns 0 if add worked or the vaddr of the first overlap if not |
1c79356b | 109 | * |
55e303ae A |
110 | * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates |
111 | * | |
112 | * 1) bump mapping busy count | |
113 | * 2) lock pmap share | |
114 | * 3) find mapping full path - finds all possible list previous elements | |
115 | * 4) upgrade pmap to exclusive | |
116 | * 5) add mapping to search list | |
117 | * 6) find physent | |
118 | * 7) lock physent | |
119 | * 8) add to physent | |
120 | * 9) unlock physent | |
121 | * 10) unlock pmap | |
122 | * 11) drop mapping busy count | |
123 | * | |
124 | * | |
125 | * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates | |
126 | * | |
127 | * 1) bump mapping busy count | |
128 | * 2) lock pmap share | |
129 | * 3) find mapping full path - finds all possible list previous elements | |
130 | * 4) upgrade pmap to exclusive | |
131 | * 5) add mapping to search list | |
132 | * 6) unlock pmap | |
133 | * 7) drop mapping busy count | |
134 | * | |
1c79356b A |
135 | */ |
136 | ||
137 | .align 5 | |
138 | .globl EXT(hw_add_map) | |
139 | ||
140 | LEXT(hw_add_map) | |
55e303ae A |
141 | |
142 | stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
143 | mflr r0 ; Save the link register | |
144 | stw r17,FM_ARG0+0x00(r1) ; Save a register | |
145 | stw r18,FM_ARG0+0x04(r1) ; Save a register | |
146 | stw r19,FM_ARG0+0x08(r1) ; Save a register | |
147 | mfsprg r19,2 ; Get feature flags | |
148 | stw r20,FM_ARG0+0x0C(r1) ; Save a register | |
149 | stw r21,FM_ARG0+0x10(r1) ; Save a register | |
150 | mtcrf 0x02,r19 ; move pf64Bit cr6 | |
151 | stw r22,FM_ARG0+0x14(r1) ; Save a register | |
152 | stw r23,FM_ARG0+0x18(r1) ; Save a register | |
153 | stw r24,FM_ARG0+0x1C(r1) ; Save a register | |
154 | stw r25,FM_ARG0+0x20(r1) ; Save a register | |
155 | stw r26,FM_ARG0+0x24(r1) ; Save a register | |
156 | stw r27,FM_ARG0+0x28(r1) ; Save a register | |
157 | stw r28,FM_ARG0+0x2C(r1) ; Save a register | |
158 | stw r29,FM_ARG0+0x30(r1) ; Save a register | |
159 | stw r30,FM_ARG0+0x34(r1) ; Save a register | |
160 | stw r31,FM_ARG0+0x38(r1) ; Save a register | |
161 | stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
162 | ||
91447636 A |
163 | #if DEBUG |
164 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
165 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
166 | bne hamPanic ; Call not valid for guest shadow assist pmap | |
167 | #endif | |
168 | ||
55e303ae A |
169 | rlwinm r11,r4,0,0,19 ; Round down to get mapping block address |
170 | mr r28,r3 ; Save the pmap | |
171 | mr r31,r4 ; Save the mapping | |
172 | bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint) | |
173 | lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap | |
174 | lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping | |
175 | ||
176 | b hamSF1x ; Done... | |
177 | ||
178 | hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap | |
179 | ld r21,mbvrswap(r11) ; Get conversion mask for mapping | |
180 | ||
181 | hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
182 | ||
183 | mr r17,r11 ; Save the MSR | |
184 | xor r28,r28,r20 ; Convert the pmap to physical addressing | |
185 | xor r31,r31,r21 ; Convert the mapping to physical addressing | |
186 | ||
187 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
188 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
189 | mr. r3,r3 ; Did we get the lock? | |
190 | lwz r24,mpFlags(r31) ; Pick up the flags | |
191 | bne-- hamBadLock ; Nope... | |
192 | ||
193 | li r21,0 ; Remember that we have the shared lock | |
1c79356b | 194 | |
55e303ae A |
195 | ; |
196 | ; Note that we do a full search (i.e., no shortcut level skips, etc.) | |
197 | ; here so that we will know the previous elements so we can dequeue them | |
198 | ; later. | |
199 | ; | |
de355530 | 200 | |
55e303ae A |
201 | hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half |
202 | lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half | |
203 | mr r3,r28 ; Pass in pmap to search | |
204 | lhz r23,mpBSize(r31) ; Get the block size for later | |
205 | mr r29,r4 ; Save top half of vaddr for later | |
206 | mr r30,r5 ; Save bottom half of vaddr for later | |
207 | ||
55e303ae A |
208 | bl EXT(mapSearchFull) ; Go see if we can find it |
209 | ||
3a60a9f5 A |
210 | li r22,lo16(0x800C) ; Get 0xFFFF800C |
211 | rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu | |
212 | addi r23,r23,1 ; Get actual length | |
213 | rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25 | |
55e303ae | 214 | lis r0,0x8000 ; Get 0xFFFFFFFF80000000 |
3a60a9f5 A |
215 | slw r9,r23,r22 ; Isolate the low part |
216 | rlwnm r22,r23,r22,22,31 ; Extract the high order | |
217 | addic r23,r9,-4096 ; Get the length to the last page | |
218 | add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit | |
219 | addme r22,r22 ; Do high order as well... | |
55e303ae | 220 | mr. r3,r3 ; Did we find a mapping here? |
3a60a9f5 A |
221 | or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry |
222 | bne-- hamOverlay ; We found a mapping, this is no good, can not double map... | |
223 | ||
55e303ae A |
224 | addc r9,r0,r23 ; Add size to get last page in new range |
225 | or. r0,r4,r5 ; Are we beyond the end? | |
226 | adde r8,r29,r22 ; Add the rest of the length on | |
55e303ae A |
227 | rlwinm r9,r9,0,0,31 ; Clean top half of sum |
228 | beq++ hamFits ; We are at the end... | |
3a60a9f5 | 229 | |
55e303ae A |
230 | cmplw cr1,r9,r5 ; Is the bottom part of our end less? |
231 | cmplw r8,r4 ; Is our end before the next (top part) | |
232 | crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal? | |
233 | cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less | |
234 | ||
235 | bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay... | |
d7e50217 | 236 | |
55e303ae A |
237 | ; |
238 | ; Here we try to convert to an exclusive lock. This will fail if someone else | |
239 | ; has it shared. | |
240 | ; | |
241 | hamFits: mr. r21,r21 ; Do we already have the exclusive lock? | |
242 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1c79356b | 243 | |
55e303ae A |
244 | bne-- hamGotX ; We already have the exclusive... |
245 | ||
246 | bl sxlkPromote ; Try to promote shared to exclusive | |
247 | mr. r3,r3 ; Could we? | |
248 | beq++ hamGotX ; Yeah... | |
249 | ||
250 | ; | |
251 | ; Since we could not promote our lock, we need to convert to it. | |
252 | ; That means that we drop the shared lock and wait to get it | |
253 | ; exclusive. Since we release the lock, we need to do the look up | |
254 | ; again. | |
255 | ; | |
d7e50217 | 256 | |
55e303ae A |
257 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
258 | bl sxlkConvert ; Convert shared to exclusive | |
259 | mr. r3,r3 ; Could we? | |
260 | bne-- hamBadLock ; Nope, we must have timed out... | |
1c79356b | 261 | |
55e303ae A |
262 | li r21,1 ; Remember that we have the exclusive lock |
263 | b hamRescan ; Go look again... | |
1c79356b | 264 | |
55e303ae | 265 | .align 5 |
1c79356b | 266 | |
3a60a9f5 | 267 | hamGotX: mr r3,r28 ; Get the pmap to insert into |
55e303ae A |
268 | mr r4,r31 ; Point to the mapping |
269 | bl EXT(mapInsert) ; Insert the mapping into the list | |
270 | ||
91447636 | 271 | rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table |
55e303ae | 272 | lhz r8,mpSpace(r31) ; Get the address space |
91447636 | 273 | lwz r11,lgpPcfg(r11) ; Get the page config |
55e303ae A |
274 | mfsdr1 r7 ; Get the hash table base/bounds |
275 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count | |
91447636 A |
276 | |
277 | andi. r0,r24,mpType ; Is this a normal mapping? | |
55e303ae A |
278 | |
279 | rlwimi r8,r8,14,4,17 ; Double address space | |
91447636 | 280 | rlwinm r9,r30,0,4,31 ; Clear segment |
55e303ae A |
281 | rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14) |
282 | rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash | |
283 | rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size) | |
284 | rlwinm r7,r7,0,16,31 ; Isolate length mask (or count) | |
285 | addi r4,r4,1 ; Bump up the mapped page count | |
91447636 | 286 | srw r9,r9,r11 ; Isolate just the page index |
55e303ae A |
287 | xor r10,r10,r8 ; Calculate the low 32 bits of the VSID |
288 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
289 | xor r9,r9,r10 ; Get the hash to the PTEG | |
290 | ||
91447636 | 291 | bne-- hamDoneNP ; Not a normal mapping, therefore, no physent... |
55e303ae A |
292 | |
293 | bl mapPhysFindLock ; Go find and lock the physent | |
294 | ||
295 | bt++ pf64Bitb,ham64 ; This is 64-bit... | |
296 | ||
297 | lwz r11,ppLink+4(r3) ; Get the alias chain pointer | |
298 | rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size | |
299 | slwi r9,r9,6 ; Make PTEG offset | |
300 | ori r7,r7,0xFFC0 ; Stick in the bottom part | |
91447636 | 301 | rlwinm r12,r11,0,~ppFlags ; Clean it up |
55e303ae A |
302 | and r9,r9,r7 ; Wrap offset into table |
303 | mr r4,r31 ; Set the link to install | |
304 | stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid) | |
305 | stw r12,mpAlias+4(r31) ; Move to the mapping | |
306 | bl mapPhyCSet32 ; Install the link | |
307 | b hamDone ; Go finish up... | |
308 | ||
309 | .align 5 | |
1c79356b | 310 | |
91447636 | 311 | ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer |
55e303ae A |
312 | subfic r7,r7,46 ; Get number of leading zeros |
313 | eqv r4,r4,r4 ; Get all ones | |
314 | ld r11,ppLink(r3) ; Get the alias chain pointer | |
91447636 | 315 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
316 | srd r4,r4,r7 ; Get the wrap mask |
317 | sldi r9,r9,7 ; Change hash to PTEG offset | |
318 | andc r11,r11,r0 ; Clean out the lock and flags | |
319 | and r9,r9,r4 ; Wrap to PTEG | |
320 | mr r4,r31 | |
321 | stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid) | |
322 | std r11,mpAlias(r31) ; Set the alias pointer in the mapping | |
323 | ||
324 | bl mapPhyCSet64 ; Install the link | |
325 | ||
326 | hamDone: bl mapPhysUnlock ; Unlock the physent chain | |
1c79356b | 327 | |
55e303ae A |
328 | hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
329 | bl sxlkUnlock ; Unlock the search list | |
1c79356b | 330 | |
55e303ae A |
331 | mr r3,r31 ; Get the mapping pointer |
332 | bl mapDropBusy ; Drop the busy count | |
1c79356b | 333 | |
55e303ae A |
334 | li r3,0 ; Set successful return |
335 | li r4,0 ; Set successful return | |
1c79356b | 336 | |
55e303ae | 337 | hamReturn: bt++ pf64Bitb,hamR64 ; Yes... |
1c79356b | 338 | |
55e303ae A |
339 | mtmsr r17 ; Restore enables/translation/etc. |
340 | isync | |
341 | b hamReturnC ; Join common... | |
1c79356b | 342 | |
55e303ae A |
343 | hamR64: mtmsrd r17 ; Restore enables/translation/etc. |
344 | isync | |
1c79356b | 345 | |
3a60a9f5 | 346 | hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return |
55e303ae A |
347 | lwz r17,FM_ARG0+0x00(r1) ; Save a register |
348 | lwz r18,FM_ARG0+0x04(r1) ; Save a register | |
349 | lwz r19,FM_ARG0+0x08(r1) ; Save a register | |
350 | lwz r20,FM_ARG0+0x0C(r1) ; Save a register | |
351 | mtlr r0 ; Restore the return | |
352 | lwz r21,FM_ARG0+0x10(r1) ; Save a register | |
353 | lwz r22,FM_ARG0+0x14(r1) ; Save a register | |
354 | lwz r23,FM_ARG0+0x18(r1) ; Save a register | |
355 | lwz r24,FM_ARG0+0x1C(r1) ; Save a register | |
356 | lwz r25,FM_ARG0+0x20(r1) ; Save a register | |
357 | lwz r26,FM_ARG0+0x24(r1) ; Save a register | |
358 | lwz r27,FM_ARG0+0x28(r1) ; Save a register | |
359 | lwz r28,FM_ARG0+0x2C(r1) ; Save a register | |
360 | lwz r29,FM_ARG0+0x30(r1) ; Save a register | |
361 | lwz r30,FM_ARG0+0x34(r1) ; Save a register | |
362 | lwz r31,FM_ARG0+0x38(r1) ; Save a register | |
363 | lwz r1,0(r1) ; Pop the stack | |
d7e50217 | 364 | |
55e303ae | 365 | blr ; Leave... |
d7e50217 | 366 | |
de355530 | 367 | |
de355530 | 368 | .align 5 |
d7e50217 | 369 | |
55e303ae A |
370 | hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags |
371 | li r0,mpC|mpR ; Get a mask to turn off RC bits | |
372 | lwz r23,mpFlags(r31) ; Get the requested flags | |
373 | lwz r20,mpVAddr(r3) ; Get the overlay address | |
374 | lwz r8,mpVAddr(r31) ; Get the requested address | |
375 | lwz r21,mpVAddr+4(r3) ; Get the overlay address | |
376 | lwz r9,mpVAddr+4(r31) ; Get the requested address | |
377 | lhz r10,mpBSize(r3) ; Get the overlay length | |
378 | lhz r11,mpBSize(r31) ; Get the requested length | |
379 | lwz r24,mpPAddr(r3) ; Get the overlay physical address | |
380 | lwz r25,mpPAddr(r31) ; Get the requested physical address | |
381 | andc r21,r21,r0 ; Clear RC bits | |
382 | andc r9,r9,r0 ; Clear RC bits | |
383 | ||
384 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
385 | bl sxlkUnlock ; Unlock the search list | |
386 | ||
387 | rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one? | |
388 | mr r3,r20 ; Save the top of the colliding address | |
389 | rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address | |
390 | ||
391 | bne++ hamRemv ; Removing, go say so so we help... | |
392 | ||
393 | cmplw r20,r8 ; High part of vaddr the same? | |
394 | cmplw cr1,r21,r9 ; Low part? | |
395 | crand cr5_eq,cr0_eq,cr1_eq ; Remember if same | |
396 | ||
397 | cmplw r10,r11 ; Size the same? | |
398 | cmplw cr1,r24,r25 ; Physical address? | |
399 | crand cr5_eq,cr5_eq,cr0_eq ; Remember | |
400 | crand cr5_eq,cr5_eq,cr1_eq ; Remember if same | |
401 | ||
91447636 A |
402 | xor r23,r23,r22 ; Compare mapping flag words |
403 | andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same? | |
55e303ae | 404 | crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check |
91447636 | 405 | bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash... |
55e303ae A |
406 | |
407 | ori r4,r4,mapRtMapDup ; Set duplicate | |
408 | b hamReturn ; And leave... | |
409 | ||
410 | hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision | |
411 | b hamReturn ; Come back yall... | |
91447636 A |
412 | |
413 | hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do | |
414 | b hamReturn ; Join common epilog code | |
55e303ae A |
415 | |
416 | .align 5 | |
417 | ||
418 | hamBadLock: li r3,0 ; Set lock time out error code | |
419 | li r4,mapRtBadLk ; Set lock time out error code | |
420 | b hamReturn ; Leave.... | |
421 | ||
91447636 A |
422 | hamPanic: lis r0,hi16(Choke) ; System abend |
423 | ori r0,r0,lo16(Choke) ; System abend | |
424 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
425 | sc | |
55e303ae | 426 | |
1c79356b | 427 | |
1c79356b A |
428 | |
429 | ||
430 | /* | |
55e303ae | 431 | * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system. |
de355530 | 432 | * |
55e303ae A |
433 | * Upon entry, R3 contains a pointer to a pmap. Since vaddr is |
434 | * a 64-bit quantity, it is a long long so it is in R4 and R5. | |
435 | * | |
436 | * We return the virtual address of the removed mapping as a | |
437 | * R3. | |
1c79356b | 438 | * |
55e303ae | 439 | * Note that this is designed to be called from 32-bit mode with a stack. |
1c79356b | 440 | * |
55e303ae A |
441 | * We disable translation and all interruptions here. This keeps is |
442 | * from having to worry about a deadlock due to having anything locked | |
443 | * and needing it to process a fault. | |
1c79356b A |
444 | * |
445 | * Note that this must be done with both interruptions off and VM off | |
446 | * | |
55e303ae A |
447 | * Remove mapping via pmap, regular page, no pte |
448 | * | |
449 | * 1) lock pmap share | |
450 | * 2) find mapping full path - finds all possible list previous elements | |
451 | * 4) upgrade pmap to exclusive | |
452 | * 3) bump mapping busy count | |
453 | * 5) remove mapping from search list | |
454 | * 6) unlock pmap | |
455 | * 7) lock physent | |
456 | * 8) remove from physent | |
457 | * 9) unlock physent | |
458 | * 10) drop mapping busy count | |
459 | * 11) drain mapping busy count | |
460 | * | |
461 | * | |
462 | * Remove mapping via pmap, regular page, with pte | |
463 | * | |
464 | * 1) lock pmap share | |
465 | * 2) find mapping full path - finds all possible list previous elements | |
466 | * 3) upgrade lock to exclusive | |
467 | * 4) bump mapping busy count | |
468 | * 5) lock PTEG | |
469 | * 6) invalidate pte and tlbie | |
470 | * 7) atomic merge rc into physent | |
471 | * 8) unlock PTEG | |
472 | * 9) remove mapping from search list | |
473 | * 10) unlock pmap | |
474 | * 11) lock physent | |
475 | * 12) remove from physent | |
476 | * 13) unlock physent | |
477 | * 14) drop mapping busy count | |
478 | * 15) drain mapping busy count | |
479 | * | |
480 | * | |
481 | * Remove mapping via pmap, I/O or block | |
482 | * | |
483 | * 1) lock pmap share | |
484 | * 2) find mapping full path - finds all possible list previous elements | |
485 | * 3) upgrade lock to exclusive | |
486 | * 4) bump mapping busy count | |
487 | * 5) mark remove-in-progress | |
488 | * 6) check and bump remove chunk cursor if needed | |
489 | * 7) unlock pmap | |
490 | * 8) if something to invalidate, go to step 11 | |
491 | ||
492 | * 9) drop busy | |
493 | * 10) return with mapRtRemove to force higher level to call again | |
494 | ||
495 | * 11) Lock PTEG | |
496 | * 12) invalidate ptes, no tlbie | |
497 | * 13) unlock PTEG | |
498 | * 14) repeat 11 - 13 for all pages in chunk | |
499 | * 15) if not final chunk, go to step 9 | |
500 | * 16) invalidate tlb entries for the whole block map but no more than the full tlb | |
501 | * 17) lock pmap share | |
502 | * 18) find mapping full path - finds all possible list previous elements | |
503 | * 19) upgrade lock to exclusive | |
504 | * 20) remove mapping from search list | |
505 | * 21) drop mapping busy count | |
506 | * 22) drain mapping busy count | |
507 | * | |
1c79356b A |
508 | */ |
509 | ||
510 | .align 5 | |
511 | .globl EXT(hw_rem_map) | |
512 | ||
513 | LEXT(hw_rem_map) | |
1c79356b | 514 | |
55e303ae A |
515 | ; |
516 | ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE | |
517 | ; THE HW_PURGE_* ROUTINES ALSO | |
518 | ; | |
1c79356b | 519 | |
55e303ae A |
520 | #define hrmStackSize ((31-15+1)*4)+4 |
521 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
522 | mflr r0 ; Save the link register | |
523 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
524 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
525 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
526 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
527 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
528 | mfsprg r19,2 ; Get feature flags | |
529 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
530 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
531 | mtcrf 0x02,r19 ; move pf64Bit cr6 | |
532 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
533 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
534 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
535 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
536 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
537 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
538 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
539 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
540 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
541 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
542 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
543 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
544 | ||
91447636 A |
545 | #if DEBUG |
546 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
547 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
548 | bne hrmPanic ; Call not valid for guest shadow assist pmap | |
549 | #endif | |
550 | ||
55e303ae A |
551 | bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint) |
552 | lwz r9,pmapvr+4(r3) ; Get conversion mask | |
553 | b hrmSF1x ; Done... | |
554 | ||
555 | hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask | |
556 | ||
557 | hrmSF1x: | |
558 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
559 | ||
560 | xor r28,r3,r9 ; Convert the pmap to physical addressing | |
1c79356b | 561 | |
55e303ae A |
562 | ; |
563 | ; Here is where we join in from the hw_purge_* routines | |
564 | ; | |
1c79356b | 565 | |
91447636 A |
566 | hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags |
567 | mfsprg r19,2 ; Get feature flags again (for alternate entries) | |
1c79356b | 568 | |
55e303ae A |
569 | mr r17,r11 ; Save the MSR |
570 | mr r29,r4 ; Top half of vaddr | |
571 | mr r30,r5 ; Bottom half of vaddr | |
1c79356b | 572 | |
91447636 A |
573 | rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active? |
574 | bne-- hrmGuest ; Yes, handle specially | |
575 | ||
55e303ae A |
576 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
577 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
578 | mr. r3,r3 ; Did we get the lock? | |
579 | bne-- hrmBadLock ; Nope... | |
1c79356b | 580 | |
55e303ae A |
581 | ; |
582 | ; Note that we do a full search (i.e., no shortcut level skips, etc.) | |
583 | ; here so that we will know the previous elements so we can dequeue them | |
584 | ; later. Note: we get back mpFlags in R7. | |
585 | ; | |
d7e50217 | 586 | |
55e303ae A |
587 | mr r3,r28 ; Pass in pmap to search |
588 | mr r4,r29 ; High order of address | |
589 | mr r5,r30 ; Low order of address | |
590 | bl EXT(mapSearchFull) ; Go see if we can find it | |
91447636 A |
591 | |
592 | andi. r0,r7,mpPerm ; Mapping marked permanent? | |
593 | crmove cr5_eq,cr0_eq ; Remember permanent marking | |
55e303ae | 594 | mr r20,r7 ; Remember mpFlags |
55e303ae | 595 | mr. r31,r3 ; Did we? (And remember mapping address for later) |
55e303ae | 596 | mr r15,r4 ; Save top of next vaddr |
55e303ae | 597 | mr r16,r5 ; Save bottom of next vaddr |
91447636 | 598 | beq-- hrmNotFound ; Nope, not found... |
55e303ae A |
599 | |
600 | bf-- cr5_eq,hrmPerm ; This one can't be removed... | |
601 | ; | |
602 | ; Here we try to promote to an exclusive lock. This will fail if someone else | |
603 | ; has it shared. | |
604 | ; | |
1c79356b | 605 | |
55e303ae A |
606 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
607 | bl sxlkPromote ; Try to promote shared to exclusive | |
608 | mr. r3,r3 ; Could we? | |
609 | beq++ hrmGotX ; Yeah... | |
1c79356b | 610 | |
55e303ae A |
611 | ; |
612 | ; Since we could not promote our lock, we need to convert to it. | |
613 | ; That means that we drop the shared lock and wait to get it | |
614 | ; exclusive. Since we release the lock, we need to do the look up | |
615 | ; again. | |
616 | ; | |
617 | ||
618 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
619 | bl sxlkConvert ; Convert shared to exclusive | |
620 | mr. r3,r3 ; Could we? | |
621 | bne-- hrmBadLock ; Nope, we must have timed out... | |
622 | ||
623 | mr r3,r28 ; Pass in pmap to search | |
624 | mr r4,r29 ; High order of address | |
625 | mr r5,r30 ; Low order of address | |
626 | bl EXT(mapSearchFull) ; Rescan the list | |
627 | ||
91447636 A |
628 | andi. r0,r7,mpPerm ; Mapping marked permanent? |
629 | crmove cr5_eq,cr0_eq ; Remember permanent marking | |
55e303ae | 630 | mr. r31,r3 ; Did we lose it when we converted? |
55e303ae | 631 | mr r20,r7 ; Remember mpFlags |
55e303ae A |
632 | mr r15,r4 ; Save top of next vaddr |
633 | mr r16,r5 ; Save bottom of next vaddr | |
634 | beq-- hrmNotFound ; Yeah, we did, someone tossed it for us... | |
de355530 | 635 | |
55e303ae A |
636 | bf-- cr5_eq,hrmPerm ; This one can't be removed... |
637 | ||
638 | ; | |
639 | ; We have an exclusive lock on the mapping chain. And we | |
640 | ; also have the busy count bumped in the mapping so it can | |
641 | ; not vanish on us. | |
642 | ; | |
643 | ||
644 | hrmGotX: mr r3,r31 ; Get the mapping | |
645 | bl mapBumpBusy ; Bump up the busy count | |
1c79356b | 646 | |
55e303ae A |
647 | ; |
648 | ; Invalidate any PTEs associated with this | |
649 | ; mapping (more than one if a block) and accumulate the reference | |
650 | ; and change bits. | |
651 | ; | |
652 | ; Here is also where we need to split 32- and 64-bit processing | |
653 | ; | |
1c79356b | 654 | |
55e303ae A |
655 | lwz r21,mpPte(r31) ; Grab the offset to the PTE |
656 | rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine | |
657 | mfsdr1 r29 ; Get the hash table base and size | |
91447636 A |
658 | |
659 | rlwinm r0,r20,0,mpType ; Isolate mapping type | |
660 | cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping | |
661 | cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type | |
662 | ||
55e303ae A |
663 | rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE |
664 | ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit) | |
665 | cmpwi cr1,r0,0 ; Have we made a PTE for this yet? | |
91447636 A |
666 | rlwinm r21,r21,0,~mpHValid ; Clear out valid bit |
667 | crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping | |
55e303ae A |
668 | rlwimi r23,r30,0,0,31 ; Insert low under high part of address |
669 | andc r29,r29,r2 ; Clean up hash table base | |
670 | li r22,0 ; Clear this on out (also sets RC to 0 if we bail) | |
671 | mr r30,r23 ; Move the now merged vaddr to the correct register | |
672 | add r26,r29,r21 ; Point to the PTEG slot | |
673 | ||
674 | bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version... | |
675 | ||
676 | rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry | |
91447636 | 677 | beq- cr5,hrmBlock32 ; Go treat block specially... |
55e303ae A |
678 | subfic r9,r9,-4 ; Get the PCA entry offset |
679 | bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE... | |
680 | add r7,r9,r29 ; Point to the PCA slot | |
55e303ae A |
681 | |
682 | bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA) | |
683 | ||
684 | lwz r21,mpPte(r31) ; Get the quick pointer again | |
685 | lwz r5,0(r26) ; Get the top of PTE | |
1c79356b | 686 | |
55e303ae | 687 | rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE |
91447636 | 688 | rlwinm r21,r21,0,~mpHValid ; Clear out valid bit |
55e303ae A |
689 | rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE |
690 | stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake) | |
691 | beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate... | |
1c79356b | 692 | |
55e303ae | 693 | stw r5,0(r26) ; Invalidate the PTE |
1c79356b | 694 | |
55e303ae | 695 | li r9,tlbieLock ; Get the TLBIE lock |
1c79356b | 696 | |
55e303ae A |
697 | sync ; Make sure the invalid PTE is actually in memory |
698 | ||
699 | hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock | |
700 | mr. r5,r5 ; Is it locked? | |
701 | li r5,1 ; Get locked indicator | |
702 | bne- hrmPtlb32 ; It is locked, go spin... | |
703 | stwcx. r5,0,r9 ; Try to get it | |
704 | bne- hrmPtlb32 ; We was beat... | |
705 | ||
706 | rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP? | |
707 | ||
708 | tlbie r30 ; Invalidate it all corresponding TLB entries | |
1c79356b | 709 | |
55e303ae | 710 | beq- hrmNTlbs ; Jump if we can not do a TLBSYNC.... |
de355530 | 711 | |
55e303ae A |
712 | eieio ; Make sure that the tlbie happens first |
713 | tlbsync ; Wait for everyone to catch up | |
714 | sync ; Make sure of it all | |
715 | ||
716 | hrmNTlbs: li r0,0 ; Clear this | |
717 | rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries) | |
718 | stw r0,tlbieLock(0) ; Clear the tlbie lock | |
719 | lis r0,0x8000 ; Get bit for slot 0 | |
720 | eieio ; Make sure those RC bit have been stashed in PTE | |
721 | ||
722 | srw r0,r0,r2 ; Get the allocation hash mask | |
723 | lwz r22,4(r26) ; Get the latest reference and change bits | |
724 | or r6,r6,r0 ; Show that this slot is free | |
725 | ||
726 | hrmUlckPCA32: | |
727 | eieio ; Make sure all updates come first | |
728 | stw r6,0(r7) ; Unlock the PTEG | |
729 | ||
730 | ; | |
731 | ; Now, it is time to remove the mapping and unlock the chain. | |
732 | ; But first, we need to make sure no one else is using this | |
733 | ; mapping so we drain the busy now | |
734 | ; | |
9bccf70c | 735 | |
55e303ae A |
736 | hrmPysDQ32: mr r3,r31 ; Point to the mapping |
737 | bl mapDrainBusy ; Go wait until mapping is unused | |
d7e50217 | 738 | |
55e303ae A |
739 | mr r3,r28 ; Get the pmap to remove from |
740 | mr r4,r31 ; Point to the mapping | |
741 | bl EXT(mapRemove) ; Remove the mapping from the list | |
d7e50217 | 742 | |
55e303ae | 743 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count |
91447636 A |
744 | rlwinm r0,r20,0,mpType ; Isolate mapping type |
745 | cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type | |
55e303ae A |
746 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
747 | subi r4,r4,1 ; Drop down the mapped page count | |
748 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
749 | bl sxlkUnlock ; Unlock the search list | |
750 | ||
91447636 | 751 | bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done... |
1c79356b | 752 | |
55e303ae | 753 | bl mapPhysFindLock ; Go find and lock the physent |
de355530 | 754 | |
55e303ae A |
755 | lwz r9,ppLink+4(r3) ; Get first mapping |
756 | ||
757 | mr r4,r22 ; Get the RC bits we just got | |
758 | bl mapPhysMerge ; Go merge the RC bits | |
759 | ||
91447636 | 760 | rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer |
d7e50217 | 761 | |
55e303ae A |
762 | cmplw r9,r31 ; Are we the first on the list? |
763 | bne- hrmNot1st ; Nope... | |
d7e50217 | 764 | |
55e303ae A |
765 | li r9,0 ; Get a 0 |
766 | lwz r4,mpAlias+4(r31) ; Get our new forward pointer | |
767 | stw r9,mpAlias+4(r31) ; Make sure we are off the chain | |
768 | bl mapPhyCSet32 ; Go set the physent link and preserve flags | |
d7e50217 | 769 | |
55e303ae | 770 | b hrmPhyDQd ; Join up and unlock it all... |
d7e50217 | 771 | |
55e303ae | 772 | .align 5 |
d7e50217 | 773 | |
55e303ae A |
774 | hrmPerm: li r8,-4096 ; Get the value we need to round down to a page |
775 | and r8,r8,r31 ; Get back to a page | |
776 | lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap | |
de355530 | 777 | |
55e303ae A |
778 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
779 | bl sxlkUnlock ; Unlock the search list | |
780 | ||
781 | xor r3,r31,r8 ; Flip mapping address to virtual | |
782 | ori r3,r3,mapRtPerm ; Set permanent mapping error | |
783 | b hrmErRtn | |
784 | ||
785 | hrmBadLock: li r3,mapRtBadLk ; Set bad lock | |
786 | b hrmErRtn | |
787 | ||
788 | hrmEndInSight: | |
789 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
790 | bl sxlkUnlock ; Unlock the search list | |
791 | ||
792 | hrmDoneChunk: | |
793 | mr r3,r31 ; Point to the mapping | |
794 | bl mapDropBusy ; Drop the busy here since we need to come back | |
795 | li r3,mapRtRemove ; Say we are still removing this | |
796 | b hrmErRtn | |
1c79356b | 797 | |
55e303ae A |
798 | .align 5 |
799 | ||
800 | hrmNotFound: | |
801 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
802 | bl sxlkUnlock ; Unlock the search list | |
91447636 | 803 | li r3,mapRtNotFnd ; No mapping found |
1c79356b | 804 | |
55e303ae | 805 | hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint) |
1c79356b | 806 | |
55e303ae A |
807 | mtmsr r17 ; Restore enables/translation/etc. |
808 | isync | |
809 | b hrmRetnCmn ; Join the common return code... | |
de355530 | 810 | |
55e303ae A |
811 | hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc. |
812 | isync | |
813 | b hrmRetnCmn ; Join the common return code... | |
1c79356b A |
814 | |
815 | .align 5 | |
1c79356b | 816 | |
55e303ae A |
817 | hrmNot1st: mr. r8,r9 ; Remember and test current node |
818 | beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us... | |
819 | lwz r9,mpAlias+4(r9) ; Chain to the next | |
820 | cmplw r9,r31 ; Is this us? | |
821 | bne- hrmNot1st ; Not us... | |
822 | ||
823 | lwz r9,mpAlias+4(r9) ; Get our forward pointer | |
824 | stw r9,mpAlias+4(r8) ; Unchain us | |
d7e50217 | 825 | |
55e303ae A |
826 | nop ; For alignment |
827 | ||
828 | hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain | |
1c79356b | 829 | |
55e303ae A |
830 | hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page |
831 | mr r3,r31 ; Copy the pointer to the mapping | |
832 | lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap | |
833 | bl mapDrainBusy ; Go wait until mapping is unused | |
1c79356b | 834 | |
55e303ae | 835 | xor r3,r31,r8 ; Flip mapping address to virtual |
1c79356b | 836 | |
55e303ae A |
837 | mtmsr r17 ; Restore enables/translation/etc. |
838 | isync | |
1c79356b | 839 | |
55e303ae A |
840 | hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr |
841 | lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return | |
842 | lwz r17,FM_ARG0+0x08(r1) ; Restore a register | |
843 | lwz r18,FM_ARG0+0x0C(r1) ; Restore a register | |
844 | mr. r6,r6 ; Should we pass back the "next" vaddr? | |
845 | lwz r19,FM_ARG0+0x10(r1) ; Restore a register | |
846 | lwz r20,FM_ARG0+0x14(r1) ; Restore a register | |
847 | mtlr r0 ; Restore the return | |
848 | ||
849 | rlwinm r16,r16,0,0,19 ; Clean to a page boundary | |
850 | beq hrmNoNextAdr ; Do not pass back the next vaddr... | |
851 | stw r15,0(r6) ; Pass back the top of the next vaddr | |
852 | stw r16,4(r6) ; Pass back the bottom of the next vaddr | |
853 | ||
854 | hrmNoNextAdr: | |
855 | lwz r15,FM_ARG0+0x00(r1) ; Restore a register | |
856 | lwz r16,FM_ARG0+0x04(r1) ; Restore a register | |
857 | lwz r21,FM_ARG0+0x18(r1) ; Restore a register | |
858 | rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit | |
859 | lwz r22,FM_ARG0+0x1C(r1) ; Restore a register | |
860 | lwz r23,FM_ARG0+0x20(r1) ; Restore a register | |
861 | lwz r24,FM_ARG0+0x24(r1) ; Restore a register | |
862 | lwz r25,FM_ARG0+0x28(r1) ; Restore a register | |
863 | lwz r26,FM_ARG0+0x2C(r1) ; Restore a register | |
864 | lwz r27,FM_ARG0+0x30(r1) ; Restore a register | |
865 | lwz r28,FM_ARG0+0x34(r1) ; Restore a register | |
866 | lwz r29,FM_ARG0+0x38(r1) ; Restore a register | |
867 | lwz r30,FM_ARG0+0x3C(r1) ; Restore a register | |
868 | lwz r31,FM_ARG0+0x40(r1) ; Restore a register | |
869 | lwz r1,0(r1) ; Pop the stack | |
870 | blr ; Leave... | |
871 | ||
872 | ; | |
873 | ; Here is where we come when all is lost. Somehow, we failed a mapping function | |
874 | ; that must work... All hope is gone. Alas, we die....... | |
875 | ; | |
d7e50217 | 876 | |
55e303ae A |
877 | hrmPanic: lis r0,hi16(Choke) ; System abend |
878 | ori r0,r0,lo16(Choke) ; System abend | |
879 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
880 | sc | |
1c79356b A |
881 | |
882 | ||
55e303ae A |
883 | ; |
884 | ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed | |
885 | ; in the range. Then, if we did not finish, return a code indicating that we need to | |
886 | ; be called again. Eventually, we will finish and then, we will do a TLBIE for each | |
887 | ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture) | |
888 | ; | |
889 | ; A potential speed up is that we stop the invalidate loop once we have walked through | |
890 | ; the hash table once. This really is not worth the trouble because we need to have | |
891 | ; mapped 1/2 of physical RAM in an individual block. Way unlikely. | |
892 | ; | |
893 | ; We should rethink this and see if we think it will be faster to check PTE and | |
894 | ; only invalidate the specific PTE rather than all block map PTEs in the PTEG. | |
895 | ; | |
1c79356b | 896 | |
55e303ae | 897 | .align 5 |
1c79356b | 898 | |
3a60a9f5 A |
899 | hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu |
900 | rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu | |
55e303ae | 901 | lhz r25,mpBSize(r31) ; Get the number of pages in block |
3a60a9f5 | 902 | lhz r23,mpSpace(r31) ; Get the address space hash |
55e303ae | 903 | lwz r9,mpBlkRemCur(r31) ; Get our current remove position |
3a60a9f5 A |
904 | rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13 |
905 | addi r25,r25,1 ; Account for zero-based counting | |
55e303ae | 906 | ori r0,r20,mpRIP ; Turn on the remove in progress flag |
3a60a9f5 | 907 | slw r25,r25,r29 ; Adjust for 32MB if needed |
55e303ae A |
908 | mfsdr1 r29 ; Get the hash table base and size |
909 | rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash | |
3a60a9f5 | 910 | subi r25,r25,1 ; Convert back to zero-based counting |
55e303ae A |
911 | lwz r27,mpVAddr+4(r31) ; Get the base vaddr |
912 | sub r4,r25,r9 ; Get number of pages left | |
913 | cmplw cr1,r9,r25 ; Have we already hit the end? | |
914 | addi r10,r9,mapRemChunk ; Point to the start of the next chunk | |
915 | addi r2,r4,-mapRemChunk ; See if mapRemChunk or more | |
916 | rlwinm r26,r29,16,7,15 ; Get the hash table size | |
917 | srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more | |
918 | stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on | |
919 | subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk) | |
920 | cmpwi cr7,r2,0 ; Remember if we have finished | |
921 | slwi r0,r9,12 ; Make cursor into page offset | |
922 | or r24,r24,r23 ; Get full hash | |
923 | and r4,r4,r2 ; If more than a chunk, bring this back to 0 | |
924 | rlwinm r29,r29,0,0,15 ; Isolate the hash table base | |
925 | add r27,r27,r0 ; Adjust vaddr to start of current chunk | |
926 | addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize) | |
927 | ||
928 | bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk... | |
929 | ||
930 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
931 | stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end) | |
932 | bl sxlkUnlock ; Unlock the search list while we are invalidating | |
933 | ||
934 | rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment | |
935 | rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27) | |
936 | xor r24,r24,r8 ; Get the proper VSID | |
937 | rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27) | |
938 | ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length | |
939 | rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset | |
940 | rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units | |
941 | add r22,r22,r30 ; Get end address (in PTEG units) | |
942 | ||
943 | hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index | |
944 | xor r23,r23,r24 ; Hash it | |
945 | and r23,r23,r26 ; Wrap it into the table | |
946 | rlwinm r3,r23,28,4,29 ; Change to PCA offset | |
947 | subfic r3,r3,-4 ; Get the PCA entry offset | |
948 | add r7,r3,r29 ; Point to the PCA slot | |
949 | cmplw cr5,r30,r22 ; Check if we reached the end of the range | |
950 | addi r30,r30,64 ; bump to the next vaddr | |
951 | ||
952 | bl mapLockPteg ; Lock the PTEG | |
953 | ||
954 | rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA | |
955 | add r5,r23,r29 ; Point to the PTEG | |
956 | li r0,0 ; Set an invalid PTE value | |
957 | beq+ hrmBNone32 ; No block map PTEs in this PTEG... | |
958 | mtcrf 0x80,r4 ; Set CRs to select PTE slots | |
959 | mtcrf 0x40,r4 ; Set CRs to select PTE slots | |
1c79356b | 960 | |
55e303ae A |
961 | bf 0,hrmSlot0 ; No autogen here |
962 | stw r0,0x00(r5) ; Invalidate PTE | |
1c79356b | 963 | |
55e303ae A |
964 | hrmSlot0: bf 1,hrmSlot1 ; No autogen here |
965 | stw r0,0x08(r5) ; Invalidate PTE | |
1c79356b | 966 | |
55e303ae A |
967 | hrmSlot1: bf 2,hrmSlot2 ; No autogen here |
968 | stw r0,0x10(r5) ; Invalidate PTE | |
1c79356b | 969 | |
55e303ae A |
970 | hrmSlot2: bf 3,hrmSlot3 ; No autogen here |
971 | stw r0,0x18(r5) ; Invalidate PTE | |
1c79356b | 972 | |
55e303ae A |
973 | hrmSlot3: bf 4,hrmSlot4 ; No autogen here |
974 | stw r0,0x20(r5) ; Invalidate PTE | |
1c79356b | 975 | |
55e303ae A |
976 | hrmSlot4: bf 5,hrmSlot5 ; No autogen here |
977 | stw r0,0x28(r5) ; Invalidate PTE | |
1c79356b | 978 | |
55e303ae A |
979 | hrmSlot5: bf 6,hrmSlot6 ; No autogen here |
980 | stw r0,0x30(r5) ; Invalidate PTE | |
1c79356b | 981 | |
55e303ae A |
982 | hrmSlot6: bf 7,hrmSlot7 ; No autogen here |
983 | stw r0,0x38(r5) ; Invalidate PTE | |
1c79356b | 984 | |
55e303ae A |
985 | hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen |
986 | or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared | |
987 | andc r6,r6,r0 ; Turn off all the old autogen bits | |
9bccf70c | 988 | |
55e303ae | 989 | hrmBNone32: eieio ; Make sure all updates come first |
9bccf70c | 990 | |
55e303ae | 991 | stw r6,0(r7) ; Unlock and set the PCA |
1c79356b | 992 | |
55e303ae | 993 | bne+ cr5,hrmBInv32 ; Go invalidate the next... |
1c79356b | 994 | |
55e303ae | 995 | bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again... |
1c79356b | 996 | |
55e303ae A |
997 | mr r3,r31 ; Copy the pointer to the mapping |
998 | bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one | |
1c79356b | 999 | |
55e303ae A |
1000 | sync ; Make sure memory is consistent |
1001 | ||
1002 | subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here) | |
1003 | li r6,63 ; Assume full invalidate for now | |
1004 | srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise | |
1005 | andc r6,r6,r5 ; Clear max if we have less to do | |
1006 | and r5,r25,r5 ; Clear count if we have more than max | |
1007 | lwz r27,mpVAddr+4(r31) ; Get the base vaddr again | |
1008 | li r7,tlbieLock ; Get the TLBIE lock | |
1009 | or r5,r5,r6 ; Get number of TLBIEs needed | |
1010 | ||
1011 | hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock | |
1012 | mr. r2,r2 ; Is it locked? | |
1013 | li r2,1 ; Get our lock value | |
1014 | bne- hrmBTLBlck ; It is locked, go wait... | |
1015 | stwcx. r2,0,r7 ; Try to get it | |
1016 | bne- hrmBTLBlck ; We was beat... | |
1017 | ||
1018 | hrmBTLBi: addic. r5,r5,-1 ; See if we did them all | |
1019 | tlbie r27 ; Invalidate it everywhere | |
1020 | addi r27,r27,0x1000 ; Up to the next page | |
1021 | bge+ hrmBTLBi ; Make sure we have done it all... | |
1022 | ||
1023 | rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP? | |
1024 | li r2,0 ; Lock clear value | |
1025 | ||
1026 | sync ; Make sure all is quiet | |
1027 | beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC.... | |
1028 | ||
1029 | eieio ; Make sure that the tlbie happens first | |
1030 | tlbsync ; Wait for everyone to catch up | |
1031 | sync ; Wait for quiet again | |
1032 | ||
1033 | hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock | |
1034 | ||
1035 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1036 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
1037 | mr. r3,r3 ; Did we get the lock? | |
1038 | bne- hrmPanic ; Nope... | |
1039 | ||
1040 | lwz r4,mpVAddr(r31) ; High order of address | |
1041 | lwz r5,mpVAddr+4(r31) ; Low order of address | |
1042 | mr r3,r28 ; Pass in pmap to search | |
1043 | mr r29,r4 ; Save this in case we need it (only promote fails) | |
1044 | mr r30,r5 ; Save this in case we need it (only promote fails) | |
1045 | bl EXT(mapSearchFull) ; Go see if we can find it | |
1046 | ||
1047 | mr. r3,r3 ; Did we? (And remember mapping address for later) | |
1048 | mr r15,r4 ; Save top of next vaddr | |
1049 | mr r16,r5 ; Save bottom of next vaddr | |
1050 | beq- hrmPanic ; Nope, not found... | |
1051 | ||
1052 | cmplw r3,r31 ; Same mapping? | |
1053 | bne- hrmPanic ; Not good... | |
1054 | ||
1055 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1056 | bl sxlkPromote ; Try to promote shared to exclusive | |
1057 | mr. r3,r3 ; Could we? | |
1058 | mr r3,r31 ; Restore the mapping pointer | |
1059 | beq+ hrmBDone1 ; Yeah... | |
1060 | ||
1061 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1062 | bl sxlkConvert ; Convert shared to exclusive | |
1063 | mr. r3,r3 ; Could we? | |
1064 | bne-- hrmPanic ; Nope, we must have timed out... | |
1065 | ||
1066 | mr r3,r28 ; Pass in pmap to search | |
1067 | mr r4,r29 ; High order of address | |
1068 | mr r5,r30 ; Low order of address | |
1069 | bl EXT(mapSearchFull) ; Rescan the list | |
1070 | ||
1071 | mr. r3,r3 ; Did we lose it when we converted? | |
1072 | mr r15,r4 ; Save top of next vaddr | |
1073 | mr r16,r5 ; Save bottom of next vaddr | |
1074 | beq-- hrmPanic ; Yeah, we did, someone tossed it for us... | |
1075 | ||
1076 | hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused | |
1077 | ||
1078 | mr r3,r28 ; Get the pmap to remove from | |
1079 | mr r4,r31 ; Point to the mapping | |
1080 | bl EXT(mapRemove) ; Remove the mapping from the list | |
1081 | ||
1082 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count | |
1083 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1084 | subi r4,r4,1 ; Drop down the mapped page count | |
1085 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
1086 | bl sxlkUnlock ; Unlock the search list | |
1087 | ||
1088 | b hrmRetn32 ; We are all done, get out... | |
1c79356b | 1089 | |
55e303ae A |
1090 | ; |
1091 | ; Here we handle the 64-bit version of hw_rem_map | |
1092 | ; | |
1093 | ||
1c79356b | 1094 | .align 5 |
55e303ae A |
1095 | |
1096 | hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry | |
91447636 | 1097 | beq-- cr5,hrmBlock64 ; Go treat block specially... |
55e303ae A |
1098 | subfic r9,r9,-4 ; Get the PCA entry offset |
1099 | bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE... | |
1100 | add r7,r9,r29 ; Point to the PCA slot | |
1101 | ||
1102 | bl mapLockPteg ; Go lock up the PTEG | |
1103 | ||
1104 | lwz r21,mpPte(r31) ; Get the quick pointer again | |
1105 | ld r5,0(r26) ; Get the top of PTE | |
1106 | ||
1107 | rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE | |
91447636 | 1108 | rlwinm r21,r21,0,~mpHValid ; Clear out valid bit |
55e303ae | 1109 | sldi r23,r5,16 ; Shift AVPN up to EA format |
91447636 | 1110 | // **** Need to adjust above shift based on the page size - large pages need to shift a bit more |
55e303ae A |
1111 | rldicr r5,r5,0,62 ; Clear the valid bit |
1112 | rldimi r23,r30,0,36 ; Insert the page portion of the VPN | |
1113 | stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake) | |
1114 | beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate... | |
1115 | ||
1116 | std r5,0(r26) ; Invalidate the PTE | |
1117 | ||
1118 | li r9,tlbieLock ; Get the TLBIE lock | |
1119 | ||
1120 | sync ; Make sure the invalid PTE is actually in memory | |
1121 | ||
1122 | hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock | |
1123 | rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to | |
1124 | mr. r5,r5 ; Is it locked? | |
1125 | li r5,1 ; Get locked indicator | |
1126 | bne-- hrmPtlb64w ; It is locked, go spin... | |
1127 | stwcx. r5,0,r9 ; Try to get it | |
1128 | bne-- hrmPtlb64 ; We was beat... | |
1129 | ||
91447636 | 1130 | tlbie r23 ; Invalidate all corresponding TLB entries |
1c79356b | 1131 | |
55e303ae A |
1132 | eieio ; Make sure that the tlbie happens first |
1133 | tlbsync ; Wait for everyone to catch up | |
55e303ae A |
1134 | |
1135 | ptesync ; Make sure of it all | |
1136 | li r0,0 ; Clear this | |
1137 | rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries) | |
1138 | stw r0,tlbieLock(0) ; Clear the tlbie lock | |
1139 | oris r0,r0,0x8000 ; Assume slot 0 | |
91447636 | 1140 | |
55e303ae | 1141 | srw r0,r0,r2 ; Get slot mask to deallocate |
d7e50217 | 1142 | |
55e303ae A |
1143 | lwz r22,12(r26) ; Get the latest reference and change bits |
1144 | or r6,r6,r0 ; Make the guy we killed free | |
de355530 | 1145 | |
55e303ae A |
1146 | hrmUlckPCA64: |
1147 | eieio ; Make sure all updates come first | |
1148 | ||
1149 | stw r6,0(r7) ; Unlock and change the PCA | |
1150 | ||
1151 | hrmPysDQ64: mr r3,r31 ; Point to the mapping | |
1152 | bl mapDrainBusy ; Go wait until mapping is unused | |
1153 | ||
91447636 | 1154 | mr r3,r28 ; Get the pmap to remove from |
55e303ae A |
1155 | mr r4,r31 ; Point to the mapping |
1156 | bl EXT(mapRemove) ; Remove the mapping from the list | |
1157 | ||
91447636 A |
1158 | rlwinm r0,r20,0,mpType ; Isolate mapping type |
1159 | cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type | |
55e303ae | 1160 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count |
55e303ae A |
1161 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
1162 | subi r4,r4,1 ; Drop down the mapped page count | |
1163 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
1164 | bl sxlkUnlock ; Unlock the search list | |
1165 | ||
91447636 | 1166 | bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done... |
1c79356b | 1167 | |
55e303ae | 1168 | bl mapPhysFindLock ; Go find and lock the physent |
1c79356b | 1169 | |
91447636 | 1170 | li r0,ppLFAmask ; Get mask to clean up mapping pointer |
55e303ae | 1171 | ld r9,ppLink(r3) ; Get first mapping |
91447636 | 1172 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae | 1173 | mr r4,r22 ; Get the RC bits we just got |
1c79356b | 1174 | |
55e303ae | 1175 | bl mapPhysMerge ; Go merge the RC bits |
d7e50217 | 1176 | |
55e303ae | 1177 | andc r9,r9,r0 ; Clean up the mapping pointer |
d7e50217 | 1178 | |
55e303ae | 1179 | cmpld r9,r31 ; Are we the first on the list? |
91447636 | 1180 | bne-- hrmNot1st64 ; Nope... |
1c79356b | 1181 | |
55e303ae A |
1182 | li r9,0 ; Get a 0 |
1183 | ld r4,mpAlias(r31) ; Get our forward pointer | |
1184 | ||
1185 | std r9,mpAlias(r31) ; Make sure we are off the chain | |
1186 | bl mapPhyCSet64 ; Go set the physent link and preserve flags | |
de355530 | 1187 | |
55e303ae A |
1188 | b hrmPhyDQd64 ; Join up and unlock it all... |
1189 | ||
1190 | hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory | |
1191 | stwcx. r5,0,r5 ; Clear the pending reservation | |
de355530 | 1192 | |
d7e50217 | 1193 | |
55e303ae A |
1194 | hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation |
1195 | mr. r5,r5 ; is it locked? | |
1196 | beq++ hrmPtlb64 ; Nope... | |
1197 | b hrmPtlb64x ; Sniff some more... | |
1198 | ||
1199 | .align 5 | |
1200 | ||
1201 | hrmNot1st64: | |
1202 | mr. r8,r9 ; Remember and test current node | |
91447636 | 1203 | beq-- hrmPhyDQd64 ; Could not find our node... |
55e303ae A |
1204 | ld r9,mpAlias(r9) ; Chain to the next |
1205 | cmpld r9,r31 ; Is this us? | |
91447636 | 1206 | bne-- hrmNot1st64 ; Not us... |
55e303ae A |
1207 | |
1208 | ld r9,mpAlias(r9) ; Get our forward pointer | |
1209 | std r9,mpAlias(r8) ; Unchain us | |
1210 | ||
1211 | nop ; For alignment | |
1212 | ||
1213 | hrmPhyDQd64: | |
1214 | bl mapPhysUnlock ; Unlock the physent chain | |
1c79356b | 1215 | |
55e303ae A |
1216 | hrmRetn64: rldicr r8,r31,0,51 ; Find start of page |
1217 | mr r3,r31 ; Copy the pointer to the mapping | |
1218 | lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap | |
1219 | bl mapDrainBusy ; Go wait until mapping is unused | |
1c79356b | 1220 | |
55e303ae | 1221 | xor r3,r31,r8 ; Flip mapping address to virtual |
d7e50217 | 1222 | |
55e303ae | 1223 | mtmsrd r17 ; Restore enables/translation/etc. |
de355530 | 1224 | isync |
55e303ae A |
1225 | |
1226 | b hrmRetnCmn ; Join the common return path... | |
1c79356b | 1227 | |
1c79356b | 1228 | |
55e303ae A |
1229 | ; |
1230 | ; Check hrmBlock32 for comments. | |
1231 | ; | |
1c79356b | 1232 | |
de355530 | 1233 | .align 5 |
55e303ae | 1234 | |
3a60a9f5 A |
1235 | hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu |
1236 | rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu | |
55e303ae A |
1237 | lhz r24,mpSpace(r31) ; Get the address space hash |
1238 | lhz r25,mpBSize(r31) ; Get the number of pages in block | |
1239 | lwz r9,mpBlkRemCur(r31) ; Get our current remove position | |
3a60a9f5 A |
1240 | rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13 |
1241 | addi r25,r25,1 ; Account for zero-based counting | |
55e303ae | 1242 | ori r0,r20,mpRIP ; Turn on the remove in progress flag |
3a60a9f5 | 1243 | slw r25,r25,r29 ; Adjust for 32MB if needed |
55e303ae A |
1244 | mfsdr1 r29 ; Get the hash table base and size |
1245 | ld r27,mpVAddr(r31) ; Get the base vaddr | |
3a60a9f5 | 1246 | subi r25,r25,1 ; Convert back to zero-based counting |
55e303ae A |
1247 | rlwinm r5,r29,0,27,31 ; Isolate the size |
1248 | sub r4,r25,r9 ; Get number of pages left | |
1249 | cmplw cr1,r9,r25 ; Have we already hit the end? | |
1250 | addi r10,r9,mapRemChunk ; Point to the start of the next chunk | |
1251 | addi r2,r4,-mapRemChunk ; See if mapRemChunk or more | |
1252 | stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on | |
1253 | srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more | |
1254 | subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk) | |
1255 | cmpwi cr7,r2,0 ; Remember if we are doing the last chunk | |
1256 | and r4,r4,r2 ; If more than a chunk, bring this back to 0 | |
1257 | srdi r27,r27,12 ; Change address into page index | |
1258 | addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize) | |
1259 | add r27,r27,r9 ; Adjust vaddr to start of current chunk | |
1260 | ||
1261 | bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk... | |
1262 | ||
1263 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1264 | stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end) | |
1265 | bl sxlkUnlock ; Unlock the search list while we are invalidating | |
1266 | ||
1267 | rlwimi r24,r24,14,4,17 ; Insert a copy of space hash | |
1268 | eqv r26,r26,r26 ; Get all foxes here | |
1269 | rldimi r24,r24,28,8 ; Make a couple copies up higher | |
1270 | rldicr r29,r29,0,47 ; Isolate just the hash table base | |
1271 | subfic r5,r5,46 ; Get number of leading zeros | |
1272 | srd r26,r26,r5 ; Shift the size bits over | |
1273 | mr r30,r27 ; Get start of chunk to invalidate | |
1274 | rldicr r26,r26,0,56 ; Make length in PTEG units | |
1275 | add r22,r4,r30 ; Get end page number | |
1276 | ||
1277 | hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID | |
1278 | rldicr r0,r0,0,49 ; Clean all but segment portion | |
1279 | rlwinm r2,r30,0,16,31 ; Get the current page index | |
1280 | xor r0,r0,r24 ; Form VSID | |
1281 | xor r8,r2,r0 ; Hash the vaddr | |
1282 | sldi r8,r8,7 ; Make into PTEG offset | |
1283 | and r23,r8,r26 ; Wrap into the hash table | |
1284 | rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here) | |
1285 | subfic r3,r3,-4 ; Get the PCA entry offset | |
1286 | add r7,r3,r29 ; Point to the PCA slot | |
1287 | ||
1288 | cmplw cr5,r30,r22 ; Have we reached the end of the range? | |
1289 | ||
1290 | bl mapLockPteg ; Lock the PTEG | |
1291 | ||
1292 | rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any | |
1293 | add r5,r23,r29 ; Point to the PTEG | |
1294 | li r0,0 ; Set an invalid PTE value | |
1295 | beq++ hrmBNone64 ; No block map PTEs in this PTEG... | |
1296 | mtcrf 0x80,r4 ; Set CRs to select PTE slots | |
1297 | mtcrf 0x40,r4 ; Set CRs to select PTE slots | |
1c79356b | 1298 | |
1c79356b | 1299 | |
55e303ae A |
1300 | bf 0,hrmSlot0s ; No autogen here |
1301 | std r0,0x00(r5) ; Invalidate PTE | |
1c79356b | 1302 | |
55e303ae A |
1303 | hrmSlot0s: bf 1,hrmSlot1s ; No autogen here |
1304 | std r0,0x10(r5) ; Invalidate PTE | |
1c79356b | 1305 | |
55e303ae A |
1306 | hrmSlot1s: bf 2,hrmSlot2s ; No autogen here |
1307 | std r0,0x20(r5) ; Invalidate PTE | |
d7e50217 | 1308 | |
55e303ae A |
1309 | hrmSlot2s: bf 3,hrmSlot3s ; No autogen here |
1310 | std r0,0x30(r5) ; Invalidate PTE | |
d7e50217 | 1311 | |
55e303ae A |
1312 | hrmSlot3s: bf 4,hrmSlot4s ; No autogen here |
1313 | std r0,0x40(r5) ; Invalidate PTE | |
d7e50217 | 1314 | |
55e303ae A |
1315 | hrmSlot4s: bf 5,hrmSlot5s ; No autogen here |
1316 | std r0,0x50(r5) ; Invalidate PTE | |
d7e50217 | 1317 | |
55e303ae A |
1318 | hrmSlot5s: bf 6,hrmSlot6s ; No autogen here |
1319 | std r0,0x60(r5) ; Invalidate PTE | |
d7e50217 | 1320 | |
55e303ae A |
1321 | hrmSlot6s: bf 7,hrmSlot7s ; No autogen here |
1322 | std r0,0x70(r5) ; Invalidate PTE | |
d7e50217 | 1323 | |
55e303ae A |
1324 | hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen |
1325 | or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared | |
1326 | andc r6,r6,r0 ; Turn off all the old autogen bits | |
1327 | ||
1328 | hrmBNone64: eieio ; Make sure all updates come first | |
1329 | stw r6,0(r7) ; Unlock and set the PCA | |
1330 | ||
1331 | addi r30,r30,1 ; bump to the next PTEG | |
1332 | bne++ cr5,hrmBInv64 ; Go invalidate the next... | |
1333 | ||
1334 | bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again... | |
1335 | ||
1336 | mr r3,r31 ; Copy the pointer to the mapping | |
1337 | bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one | |
1338 | ||
1339 | sync ; Make sure memory is consistent | |
1340 | ||
1341 | subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here) | |
1342 | li r6,255 ; Assume full invalidate for now | |
1343 | srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise | |
1344 | andc r6,r6,r5 ; Clear max if we have less to do | |
1345 | and r5,r25,r5 ; Clear count if we have more than max | |
1346 | sldi r24,r24,28 ; Get the full XOR value over to segment position | |
1347 | ld r27,mpVAddr(r31) ; Get the base vaddr | |
1348 | li r7,tlbieLock ; Get the TLBIE lock | |
1349 | or r5,r5,r6 ; Get number of TLBIEs needed | |
1c79356b | 1350 | |
55e303ae A |
1351 | hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock |
1352 | mr. r2,r2 ; Is it locked? | |
1353 | li r2,1 ; Get our lock value | |
1354 | bne-- hrmBTLBlcm ; It is locked, go wait... | |
1355 | stwcx. r2,0,r7 ; Try to get it | |
1356 | bne-- hrmBTLBlcl ; We was beat... | |
1357 | ||
1358 | hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID | |
1359 | rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra | |
1360 | addic. r5,r5,-1 ; See if we did them all | |
1361 | xor r2,r2,r24 ; Make the VSID | |
1362 | rldimi r2,r27,0,36 ; Insert the page portion of the VPN | |
1363 | rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta | |
1c79356b | 1364 | |
55e303ae A |
1365 | tlbie r2 ; Invalidate it everywhere |
1366 | addi r27,r27,0x1000 ; Up to the next page | |
1367 | bge++ hrmBTLBj ; Make sure we have done it all... | |
1c79356b | 1368 | |
55e303ae A |
1369 | eieio ; Make sure that the tlbie happens first |
1370 | tlbsync ; wait for everyone to catch up | |
1c79356b | 1371 | |
55e303ae | 1372 | li r2,0 ; Lock clear value |
d7e50217 | 1373 | |
55e303ae | 1374 | ptesync ; Wait for quiet again |
55e303ae A |
1375 | |
1376 | stw r2,tlbieLock(0) ; Clear the tlbie lock | |
1377 | ||
1378 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1379 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
1380 | mr. r3,r3 ; Did we get the lock? | |
1381 | bne- hrmPanic ; Nope... | |
1382 | ||
1383 | lwz r4,mpVAddr(r31) ; High order of address | |
1384 | lwz r5,mpVAddr+4(r31) ; Low order of address | |
1385 | mr r3,r28 ; Pass in pmap to search | |
1386 | mr r29,r4 ; Save this in case we need it (only promote fails) | |
1387 | mr r30,r5 ; Save this in case we need it (only promote fails) | |
1388 | bl EXT(mapSearchFull) ; Go see if we can find it | |
1389 | ||
1390 | mr. r3,r3 ; Did we? (And remember mapping address for later) | |
1391 | mr r15,r4 ; Save top of next vaddr | |
1392 | mr r16,r5 ; Save bottom of next vaddr | |
1393 | beq- hrmPanic ; Nope, not found... | |
1394 | ||
1395 | cmpld r3,r31 ; Same mapping? | |
1396 | bne- hrmPanic ; Not good... | |
1397 | ||
1398 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1399 | bl sxlkPromote ; Try to promote shared to exclusive | |
1400 | mr. r3,r3 ; Could we? | |
1401 | mr r3,r31 ; Restore the mapping pointer | |
1402 | beq+ hrmBDone2 ; Yeah... | |
1403 | ||
1404 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1405 | bl sxlkConvert ; Convert shared to exclusive | |
1406 | mr. r3,r3 ; Could we? | |
1407 | bne-- hrmPanic ; Nope, we must have timed out... | |
1408 | ||
1409 | mr r3,r28 ; Pass in pmap to search | |
1410 | mr r4,r29 ; High order of address | |
1411 | mr r5,r30 ; Low order of address | |
1412 | bl EXT(mapSearchFull) ; Rescan the list | |
1413 | ||
1414 | mr. r3,r3 ; Did we lose it when we converted? | |
1415 | mr r15,r4 ; Save top of next vaddr | |
1416 | mr r16,r5 ; Save bottom of next vaddr | |
1417 | beq-- hrmPanic ; Yeah, we did, someone tossed it for us... | |
1418 | ||
1419 | hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused | |
1420 | ||
1421 | mr r3,r28 ; Get the pmap to remove from | |
1422 | mr r4,r31 ; Point to the mapping | |
1423 | bl EXT(mapRemove) ; Remove the mapping from the list | |
1424 | ||
1425 | lwz r4,pmapResidentCnt(r28) ; Get the mapped page count | |
1426 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1427 | subi r4,r4,1 ; Drop down the mapped page count | |
1428 | stw r4,pmapResidentCnt(r28) ; Set the mapped page count | |
1429 | bl sxlkUnlock ; Unlock the search list | |
1430 | ||
1431 | b hrmRetn64 ; We are all done, get out... | |
1432 | ||
1433 | hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line | |
1434 | stwcx. r2,0,r2 ; Unreserve it | |
1435 | ||
1436 | hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock | |
1437 | mr. r2,r2 ; Is it held? | |
1438 | beq++ hrmBTLBlcl ; Nope... | |
1439 | b hrmBTLBlcn ; Yeah... | |
1c79356b | 1440 | |
91447636 A |
1441 | ; |
1442 | ; Guest shadow assist -- mapping remove | |
1443 | ; | |
1444 | ; Method of operation: | |
1445 | ; o Locate the VMM extension block and the host pmap | |
1446 | ; o Obtain the host pmap's search lock exclusively | |
1447 | ; o Locate the requested mapping in the shadow hash table, | |
1448 | ; exit if not found | |
1449 | ; o If connected, disconnect the PTE and gather R&C to physent | |
1450 | ; o Locate and lock the physent | |
1451 | ; o Remove mapping from physent's chain | |
1452 | ; o Unlock physent | |
1453 | ; o Unlock pmap's search lock | |
1454 | ; | |
1455 | ; Non-volatile registers on entry: | |
1456 | ; r17: caller's msr image | |
1457 | ; r19: sprg2 (feature flags) | |
1458 | ; r28: guest pmap's physical address | |
1459 | ; r29: high-order 32 bits of guest virtual address | |
1460 | ; r30: low-order 32 bits of guest virtual address | |
1461 | ; | |
1462 | ; Non-volatile register usage: | |
1463 | ; r26: VMM extension block's physical address | |
1464 | ; r27: host pmap's physical address | |
1465 | ; r28: guest pmap's physical address | |
1466 | ; r29: physent's physical address | |
1467 | ; r30: guest virtual address | |
1468 | ; r31: guest mapping's physical address | |
1469 | ; | |
1470 | .align 5 | |
1471 | hrmGuest: | |
1472 | rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr | |
1473 | bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine | |
1474 | lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr | |
1475 | lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr | |
1476 | b hrmGStart ; Join common code | |
1477 | ||
1478 | hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr | |
1479 | ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr | |
1480 | rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr | |
1481 | ||
1482 | hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
1483 | bl sxlkExclusive ; Get lock exclusive | |
1484 | ||
1485 | lwz r3,vxsGrm(r26) ; Get mapping remove request count | |
1486 | ||
1487 | lwz r9,pmapSpace(r28) ; r9 <- guest space ID number | |
1488 | la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index | |
1489 | srwi r11,r30,12 ; Form shadow hash: | |
1490 | xor r11,r9,r11 ; spaceID ^ (vaddr >> 12) | |
1491 | rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
1492 | ; Form index offset from hash page number | |
1493 | add r31,r31,r12 ; r31 <- hash page index entry | |
1494 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
1495 | mtctr r0 ; in this group | |
1496 | bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search | |
1497 | lwz r31,4(r31) ; r31 <- hash page paddr | |
1498 | rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
1499 | ; r31 <- hash group paddr | |
1500 | ||
1501 | addi r3,r3,1 ; Increment remove request count | |
1502 | stw r3,vxsGrm(r26) ; Update remove request count | |
1503 | ||
1504 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
1505 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
1506 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
1507 | b hrmG32SrchLp ; Let the search begin! | |
1508 | ||
1509 | .align 5 | |
1510 | hrmG32SrchLp: | |
1511 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1512 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
1513 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
1514 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
1515 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
1516 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
1517 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1518 | xor r7,r7,r9 ; Compare space ID | |
1519 | or r0,r11,r7 ; r0 <- !(free && space match) | |
1520 | xor r8,r8,r30 ; Compare virtual address | |
1521 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
1522 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1523 | ||
1524 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
1525 | bdnz hrmG32SrchLp ; Iterate | |
1526 | ||
1527 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1528 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
1529 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1530 | xor r4,r4,r9 ; Compare space ID | |
1531 | or r0,r11,r4 ; r0 <- !(free && space match) | |
1532 | xor r5,r5,r30 ; Compare virtual address | |
1533 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
1534 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1535 | b hrmGSrchMiss ; No joy in our hash group | |
1536 | ||
1537 | hrmG64Search: | |
1538 | ld r31,0(r31) ; r31 <- hash page paddr | |
1539 | insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
1540 | ; r31 <- hash group paddr | |
1541 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
1542 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
1543 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
1544 | b hrmG64SrchLp ; Let the search begin! | |
1545 | ||
1546 | .align 5 | |
1547 | hrmG64SrchLp: | |
1548 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1549 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
1550 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
1551 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
1552 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
1553 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
1554 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1555 | xor r7,r7,r9 ; Compare space ID | |
1556 | or r0,r11,r7 ; r0 <- !(free && space match) | |
1557 | xor r8,r8,r30 ; Compare virtual address | |
1558 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
1559 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1560 | ||
1561 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
1562 | bdnz hrmG64SrchLp ; Iterate | |
1563 | ||
1564 | mr r6,r3 ; r6 <- current mapping slot's flags | |
1565 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
1566 | rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag | |
1567 | xor r4,r4,r9 ; Compare space ID | |
1568 | or r0,r11,r4 ; r0 <- !(free && space match) | |
1569 | xor r5,r5,r30 ; Compare virtual address | |
1570 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
1571 | beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping) | |
1572 | hrmGSrchMiss: | |
1573 | lwz r3,vxsGrmMiss(r26) ; Get remove miss count | |
1574 | li r25,mapRtNotFnd ; Return not found | |
1575 | addi r3,r3,1 ; Increment miss count | |
1576 | stw r3,vxsGrmMiss(r26) ; Update miss count | |
1577 | b hrmGReturn ; Join guest return | |
1578 | ||
1579 | .align 5 | |
1580 | hrmGSrchHit: | |
1581 | rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant? | |
1582 | bne hrmGDormant ; Yes, nothing to disconnect | |
1583 | ||
1584 | lwz r3,vxsGrmActive(r26) ; Get active hit count | |
1585 | addi r3,r3,1 ; Increment active hit count | |
1586 | stw r3,vxsGrmActive(r26) ; Update hit count | |
1587 | ||
1588 | bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately | |
1589 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
1590 | ; r31 <- mapping's physical address | |
1591 | ; r3 -> PTE slot physical address | |
1592 | ; r4 -> High-order 32 bits of PTE | |
1593 | ; r5 -> Low-order 32 bits of PTE | |
1594 | ; r6 -> PCA | |
1595 | ; r7 -> PCA physical address | |
1596 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
1597 | b hrmGFreePTE ; Join 64-bit path to release the PTE | |
1598 | hrmGDscon64: | |
1599 | bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
1600 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
1601 | hrmGFreePTE: | |
1602 | mr. r3,r3 ; Was there a valid PTE? | |
1603 | beq hrmGDormant ; No valid PTE, we're almost done | |
1604 | lis r0,0x8000 ; Prepare free bit for this slot | |
1605 | srw r0,r0,r2 ; Position free bit | |
1606 | or r6,r6,r0 ; Set it in our PCA image | |
1607 | lwz r8,mpPte(r31) ; Get PTE offset | |
1608 | rlwinm r8,r8,0,~mpHValid ; Make the offset invalid | |
1609 | stw r8,mpPte(r31) ; Save invalidated PTE offset | |
1610 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
1611 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
1612 | ||
1613 | hrmGDormant: | |
1614 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
1615 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
1616 | mr. r29,r3 ; Got lock on our physent? | |
1617 | beq-- hrmGBadPLock ; No, time to bail out | |
1618 | ||
1619 | crset cr1_eq ; cr1_eq <- previous link is the anchor | |
1620 | bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine | |
1621 | la r11,ppLink+4(r29) ; Point to chain anchor | |
1622 | lwz r9,ppLink+4(r29) ; Get chain anchor | |
1623 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
1624 | hrmGRemLoop: | |
1625 | beq- hrmGPEMissMiss ; End of chain, this is not good | |
1626 | cmplw r9,r31 ; Is this the mapping to remove? | |
1627 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
1628 | bne hrmGRemNext ; No, chain onward | |
1629 | bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor | |
1630 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
1631 | b hrmGDelete ; Finish deleting mapping | |
1632 | hrmGRemRetry: | |
1633 | lwarx r0,0,r11 ; Get previous link | |
1634 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
1635 | stwcx. r0,0,r11 ; Update previous link | |
1636 | bne- hrmGRemRetry ; Lost reservation, retry | |
1637 | b hrmGDelete ; Finish deleting mapping | |
1638 | ||
1639 | hrmGRemNext: | |
1640 | la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
1641 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
1642 | mr. r9,r8 ; Does next entry exist? | |
1643 | b hrmGRemLoop ; Carry on | |
1644 | ||
1645 | hrmGRemove64: | |
1646 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
1647 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
1648 | la r11,ppLink(r29) ; Point to chain anchor | |
1649 | ld r9,ppLink(r29) ; Get chain anchor | |
1650 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
1651 | hrmGRem64Lp: | |
1652 | beq-- hrmGPEMissMiss ; End of chain, this is not good | |
1653 | cmpld r9,r31 ; Is this the mapping to remove? | |
1654 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
1655 | bne hrmGRem64Nxt ; No mapping to remove, chain on, dude | |
1656 | bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor | |
1657 | std r8,0(r11) ; Unchain gpv->phys mapping | |
1658 | b hrmGDelete ; Finish deleting mapping | |
1659 | hrmGRem64Rt: | |
1660 | ldarx r0,0,r11 ; Get previous link | |
1661 | and r0,r0,r7 ; Get flags | |
1662 | or r0,r0,r8 ; Insert new forward pointer | |
1663 | stdcx. r0,0,r11 ; Slam it back in | |
1664 | bne-- hrmGRem64Rt ; Lost reservation, retry | |
1665 | b hrmGDelete ; Finish deleting mapping | |
1666 | ||
1667 | .align 5 | |
1668 | hrmGRem64Nxt: | |
1669 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
1670 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
1671 | mr. r9,r8 ; Does next entry exist? | |
1672 | b hrmGRem64Lp ; Carry on | |
1673 | ||
1674 | hrmGDelete: | |
1675 | mr r3,r29 ; r3 <- physent addr | |
1676 | bl mapPhysUnlock ; Unlock physent chain | |
1677 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
1678 | rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags | |
1679 | ori r3,r3,mpgFree ; Mark mapping free | |
1680 | stw r3,mpFlags(r31) ; Update flags | |
1681 | li r25,mapRtGuest ; Set return code to 'found guest mapping' | |
1682 | ||
1683 | hrmGReturn: | |
1684 | la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
1685 | bl sxlkUnlock ; Release host pmap search lock | |
1686 | ||
1687 | mr r3,r25 ; r3 <- return code | |
1688 | bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately | |
1689 | mtmsr r17 ; Restore 'rupts, translation | |
1690 | isync ; Throw a small wrench into the pipeline | |
1691 | b hrmRetnCmn ; Nothing to do now but pop a frame and return | |
1692 | hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode | |
1693 | b hrmRetnCmn ; Join common return | |
1694 | ||
1695 | hrmGBadPLock: | |
1696 | hrmGPEMissMiss: | |
1697 | lis r0,hi16(Choke) ; Seen the arrow on the doorpost | |
1698 | ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED" | |
1699 | li r3,failMapping ; All the way from New Orleans | |
1700 | sc ; To Jeruselem | |
1c79356b A |
1701 | |
1702 | ||
1703 | /* | |
55e303ae | 1704 | * mapping *hw_purge_phys(physent) - remove a mapping from the system |
1c79356b | 1705 | * |
55e303ae | 1706 | * Upon entry, R3 contains a pointer to a physent. |
1c79356b | 1707 | * |
55e303ae A |
1708 | * This function removes the first mapping from a physical entry |
1709 | * alias list. It locks the list, extracts the vaddr and pmap from | |
1710 | * the first entry. It then jumps into the hw_rem_map function. | |
1711 | * NOTE: since we jump into rem_map, we need to set up the stack | |
1712 | * identically. Also, we set the next parm to 0 so we do not | |
1713 | * try to save a next vaddr. | |
1714 | * | |
1715 | * We return the virtual address of the removed mapping as a | |
1716 | * R3. | |
de355530 | 1717 | * |
55e303ae | 1718 | * Note that this is designed to be called from 32-bit mode with a stack. |
de355530 | 1719 | * |
55e303ae A |
1720 | * We disable translation and all interruptions here. This keeps is |
1721 | * from having to worry about a deadlock due to having anything locked | |
1722 | * and needing it to process a fault. | |
1c79356b | 1723 | * |
55e303ae A |
1724 | * Note that this must be done with both interruptions off and VM off |
1725 | * | |
1726 | * | |
1727 | * Remove mapping via physical page (mapping_purge) | |
1728 | * | |
1729 | * 1) lock physent | |
1730 | * 2) extract vaddr and pmap | |
1731 | * 3) unlock physent | |
1732 | * 4) do "remove mapping via pmap" | |
1733 | * | |
1c79356b | 1734 | * |
1c79356b A |
1735 | */ |
1736 | ||
1737 | .align 5 | |
55e303ae A |
1738 | .globl EXT(hw_purge_phys) |
1739 | ||
1740 | LEXT(hw_purge_phys) | |
1741 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
1742 | mflr r0 ; Save the link register | |
1743 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
1744 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
1745 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
1746 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
1747 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
1748 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
1749 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
1750 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
1751 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
1752 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
1753 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
1754 | li r6,0 ; Set no next address return | |
1755 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
1756 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
1757 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
1758 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
1759 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
1760 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
1761 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
1762 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1763 | ||
1764 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
1765 | ||
1766 | bl mapPhysLock ; Lock the physent | |
1767 | ||
1768 | bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint) | |
1769 | ||
1770 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
91447636 | 1771 | li r0,ppFlags ; Set the bottom stuff to clear |
55e303ae A |
1772 | b hppJoin ; Join the common... |
1773 | ||
91447636 | 1774 | hppSF: li r0,ppLFAmask |
55e303ae | 1775 | ld r12,ppLink(r3) ; Get the pointer to the first mapping |
91447636 | 1776 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
1777 | |
1778 | hppJoin: andc. r12,r12,r0 ; Clean and test link | |
1779 | beq-- hppNone ; There are no more mappings on physical page | |
1780 | ||
1781 | lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
1782 | lhz r7,mpSpace(r12) ; Get the address space hash | |
1783 | ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
1784 | slwi r0,r7,2 ; Multiply space by 4 | |
1785 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
1786 | slwi r7,r7,3 ; Multiply space by 8 | |
1787 | lwz r5,mpVAddr+4(r12) ; and the bottom | |
1788 | add r7,r7,r0 ; Get correct displacement into translate table | |
1789 | lwz r28,0(r28) ; Get the actual translation map | |
de355530 | 1790 | |
55e303ae A |
1791 | add r28,r28,r7 ; Point to the pmap translation |
1792 | ||
1793 | bl mapPhysUnlock ; Time to unlock the physical entry | |
1794 | ||
1795 | bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint) | |
1796 | ||
1797 | lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap | |
1798 | b hrmJoin ; Go remove the mapping... | |
1799 | ||
1800 | hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap | |
1801 | b hrmJoin ; Go remove the mapping... | |
d7e50217 | 1802 | |
de355530 | 1803 | .align 5 |
55e303ae A |
1804 | |
1805 | hppNone: bl mapPhysUnlock ; Time to unlock the physical entry | |
1806 | ||
1807 | bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)... | |
1808 | ||
1809 | mtmsr r11 ; Restore enables/translation/etc. | |
1810 | isync | |
1811 | b hppRetnCmn ; Join the common return code... | |
1c79356b | 1812 | |
55e303ae A |
1813 | hppSF3: mtmsrd r11 ; Restore enables/translation/etc. |
1814 | isync | |
1c79356b | 1815 | |
55e303ae A |
1816 | ; |
1817 | ; NOTE: we have not used any registers other than the volatiles to this point | |
1818 | ; | |
1c79356b | 1819 | |
55e303ae | 1820 | hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
1c79356b | 1821 | |
91447636 | 1822 | li r3,mapRtEmpty ; Physent chain is empty |
55e303ae A |
1823 | mtlr r12 ; Restore the return |
1824 | lwz r1,0(r1) ; Pop the stack | |
1825 | blr ; Leave... | |
1c79356b A |
1826 | |
1827 | /* | |
55e303ae A |
1828 | * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system. |
1829 | * | |
1830 | * Upon entry, R3 contains a pointer to a pmap. Since vaddr is | |
1831 | * a 64-bit quantity, it is a long long so it is in R4 and R5. | |
1832 | * | |
1833 | * We return the virtual address of the removed mapping as a | |
1834 | * R3. | |
1835 | * | |
1836 | * Note that this is designed to be called from 32-bit mode with a stack. | |
1837 | * | |
1838 | * We disable translation and all interruptions here. This keeps is | |
1839 | * from having to worry about a deadlock due to having anything locked | |
1840 | * and needing it to process a fault. | |
1841 | * | |
1842 | * Note that this must be done with both interruptions off and VM off | |
1843 | * | |
1844 | * Remove a mapping which can be reestablished by VM | |
1845 | * | |
1c79356b | 1846 | */ |
1c79356b | 1847 | |
55e303ae A |
1848 | .align 5 |
1849 | .globl EXT(hw_purge_map) | |
1850 | ||
1851 | LEXT(hw_purge_map) | |
1852 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
1853 | mflr r0 ; Save the link register | |
1854 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
1855 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
1856 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
1857 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
1858 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
1859 | mfsprg r19,2 ; Get feature flags | |
1860 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
1861 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
1862 | mtcrf 0x02,r19 ; move pf64Bit cr6 | |
1863 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
1864 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
1865 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
1866 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
1867 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
1868 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
1869 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
1870 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
1871 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
1872 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
1873 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
1874 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1875 | ||
91447636 A |
1876 | #if DEBUG |
1877 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
1878 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
1879 | bne hpmPanic ; Call not valid for guest shadow assist pmap | |
1880 | #endif | |
1881 | ||
55e303ae A |
1882 | bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint) |
1883 | lwz r9,pmapvr+4(r3) ; Get conversion mask | |
1884 | b hpmSF1x ; Done... | |
1885 | ||
1886 | hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask | |
1887 | ||
1888 | hpmSF1x: | |
1889 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
1890 | ||
1891 | xor r28,r3,r9 ; Convert the pmap to physical addressing | |
1892 | ||
1893 | mr r17,r11 ; Save the MSR | |
1894 | ||
1895 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
1896 | bl sxlkExclusive ; Go get an exclusive lock on the mapping lists | |
1897 | mr. r3,r3 ; Did we get the lock? | |
1898 | bne-- hrmBadLock ; Nope... | |
1899 | ; | |
1900 | ; Note that we do a full search (i.e., no shortcut level skips, etc.) | |
1901 | ; here so that we will know the previous elements so we can dequeue them | |
1902 | ; later. | |
1903 | ; | |
1904 | hpmSearch: | |
1905 | mr r3,r28 ; Pass in pmap to search | |
1906 | mr r29,r4 ; Top half of vaddr | |
1907 | mr r30,r5 ; Bottom half of vaddr | |
1908 | bl EXT(mapSearchFull) ; Rescan the list | |
1909 | mr. r31,r3 ; Did we? (And remember mapping address for later) | |
1910 | or r0,r4,r5 ; Are we beyond the end? | |
1911 | mr r15,r4 ; Save top of next vaddr | |
1912 | cmplwi cr1,r0,0 ; See if there is another | |
1913 | mr r16,r5 ; Save bottom of next vaddr | |
1914 | bne-- hpmGotOne ; We found one, go check it out... | |
1915 | ||
1916 | hpmCNext: bne++ cr1,hpmSearch ; There is another to check... | |
1917 | b hrmNotFound ; No more in pmap to check... | |
1918 | ||
1919 | hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags | |
91447636 | 1920 | andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent |
ab86ba33 A |
1921 | rlwinm r21,r20,8,24,31 ; Extract the busy count |
1922 | cmplwi cr2,r21,0 ; Is it busy? | |
1923 | crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed? | |
55e303ae A |
1924 | beq++ hrmGotX ; Found, branch to remove the mapping... |
1925 | b hpmCNext ; Nope... | |
1c79356b | 1926 | |
91447636 A |
1927 | hpmPanic: lis r0,hi16(Choke) ; System abend |
1928 | ori r0,r0,lo16(Choke) ; System abend | |
1929 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
1930 | sc | |
1931 | ||
55e303ae A |
1932 | /* |
1933 | * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space | |
1934 | * | |
1935 | * Upon entry, R3 contains a pointer to a pmap. | |
1936 | * pa is a pointer to the physent | |
1937 | * | |
1938 | * This function removes the first mapping for a specific pmap from a physical entry | |
1939 | * alias list. It locks the list, extracts the vaddr and pmap from | |
1940 | * the first apporpriate entry. It then jumps into the hw_rem_map function. | |
1941 | * NOTE: since we jump into rem_map, we need to set up the stack | |
1942 | * identically. Also, we set the next parm to 0 so we do not | |
1943 | * try to save a next vaddr. | |
1944 | * | |
1945 | * We return the virtual address of the removed mapping as a | |
1946 | * R3. | |
1947 | * | |
1948 | * Note that this is designed to be called from 32-bit mode with a stack. | |
1949 | * | |
1950 | * We disable translation and all interruptions here. This keeps is | |
1951 | * from having to worry about a deadlock due to having anything locked | |
1952 | * and needing it to process a fault. | |
1953 | * | |
1954 | * Note that this must be done with both interruptions off and VM off | |
1955 | * | |
1956 | * | |
1957 | * Remove mapping via physical page (mapping_purge) | |
1958 | * | |
1959 | * 1) lock physent | |
1960 | * 2) extract vaddr and pmap | |
1961 | * 3) unlock physent | |
1962 | * 4) do "remove mapping via pmap" | |
1963 | * | |
1964 | * | |
1965 | */ | |
1c79356b | 1966 | |
55e303ae A |
1967 | .align 5 |
1968 | .globl EXT(hw_purge_space) | |
1969 | ||
1970 | LEXT(hw_purge_space) | |
1971 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
1972 | mflr r0 ; Save the link register | |
1973 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
1974 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
1975 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
1976 | mfsprg r2,2 ; Get feature flags | |
1977 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
1978 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
1979 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
1980 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
1981 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
1982 | mtcrf 0x02,r2 ; move pf64Bit cr6 | |
1983 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
1984 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
1985 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
1986 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
1987 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
1988 | li r6,0 ; Set no next address return | |
1989 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
1990 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
1991 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
1992 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
1993 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
1994 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1995 | ||
91447636 A |
1996 | #if DEBUG |
1997 | lwz r11,pmapFlags(r4) ; Get pmaps flags | |
1998 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
1999 | bne hpsPanic ; Call not valid for guest shadow assist pmap | |
2000 | #endif | |
2001 | ||
55e303ae A |
2002 | bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint) |
2003 | ||
2004 | lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap | |
2005 | ||
2006 | b hpsSF1x ; Done... | |
2007 | ||
2008 | hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap | |
2009 | ||
2010 | hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
2011 | ||
2012 | xor r4,r4,r9 ; Convert the pmap to physical addressing | |
2013 | ||
2014 | bl mapPhysLock ; Lock the physent | |
2015 | ||
2016 | lwz r8,pmapSpace(r4) ; Get the space hash | |
2017 | ||
2018 | bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint) | |
2019 | ||
2020 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
2021 | ||
91447636 | 2022 | hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address |
55e303ae A |
2023 | beq hpsNone ; Did not find one... |
2024 | ||
2025 | lhz r10,mpSpace(r12) ; Get the space | |
2026 | ||
2027 | cmplw r10,r8 ; Is this one of ours? | |
2028 | beq hpsFnd ; Yes... | |
2029 | ||
2030 | lwz r12,mpAlias+4(r12) ; Chain on to the next | |
2031 | b hpsSrc32 ; Check it out... | |
1c79356b | 2032 | |
55e303ae A |
2033 | .align 5 |
2034 | ||
91447636 | 2035 | hpsSF: li r0,ppLFAmask |
55e303ae | 2036 | ld r12,ppLink(r3) ; Get the pointer to the first mapping |
91447636 | 2037 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
2038 | |
2039 | hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address | |
2040 | beq hpsNone ; Did not find one... | |
2041 | ||
2042 | lhz r10,mpSpace(r12) ; Get the space | |
2043 | ||
2044 | cmplw r10,r8 ; Is this one of ours? | |
2045 | beq hpsFnd ; Yes... | |
2046 | ||
2047 | ld r12,mpAlias(r12) ; Chain on to the next | |
2048 | b hpsSrc64 ; Check it out... | |
2049 | ||
2050 | .align 5 | |
1c79356b | 2051 | |
55e303ae A |
2052 | hpsFnd: mr r28,r4 ; Set the pmap physical address |
2053 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
2054 | lwz r5,mpVAddr+4(r12) ; and the bottom | |
2055 | ||
2056 | bl mapPhysUnlock ; Time to unlock the physical entry | |
2057 | b hrmJoin ; Go remove the mapping... | |
2058 | ||
2059 | .align 5 | |
2060 | ||
2061 | hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry | |
1c79356b | 2062 | |
55e303ae | 2063 | bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)... |
1c79356b | 2064 | |
55e303ae A |
2065 | mtmsr r11 ; Restore enables/translation/etc. |
2066 | isync | |
2067 | b hpsRetnCmn ; Join the common return code... | |
1c79356b | 2068 | |
55e303ae A |
2069 | hpsSF3: mtmsrd r11 ; Restore enables/translation/etc. |
2070 | isync | |
1c79356b | 2071 | |
55e303ae A |
2072 | ; |
2073 | ; NOTE: we have not used any registers other than the volatiles to this point | |
2074 | ; | |
d7e50217 | 2075 | |
55e303ae A |
2076 | hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
2077 | ||
91447636 | 2078 | li r3,mapRtEmpty ; No mappings for specified pmap on physent chain |
55e303ae A |
2079 | mtlr r12 ; Restore the return |
2080 | lwz r1,0(r1) ; Pop the stack | |
2081 | blr ; Leave... | |
1c79356b | 2082 | |
91447636 A |
2083 | hpsPanic: lis r0,hi16(Choke) ; System abend |
2084 | ori r0,r0,lo16(Choke) ; System abend | |
2085 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
2086 | sc | |
2087 | ||
2088 | /* | |
2089 | * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host | |
2090 | * on this physent chain | |
2091 | * | |
2092 | * Locates the first guest mapping on the physent chain that is associated with the | |
2093 | * specified host pmap. If this succeeds, the mapping is removed by joining the general | |
2094 | * remove path; otherwise, we return NULL. The caller is expected to invoke this entry | |
2095 | * repeatedly until no additional guest mappings that match our criteria are removed. | |
2096 | * | |
2097 | * Because this entry point exits through hw_rem_map, our prolog pushes its frame. | |
2098 | * | |
2099 | * Parameters: | |
2100 | * r3 : physent, 32-bit kernel virtual address | |
2101 | * r4 : host pmap, 32-bit kernel virtual address | |
2102 | * | |
2103 | * Volatile register usage (for linkage through hrmJoin): | |
2104 | * r4 : high-order 32 bits of guest virtual address | |
2105 | * r5 : low-order 32 bits of guest virtual address | |
2106 | * r11: saved MSR image | |
2107 | * | |
2108 | * Non-volatile register usage: | |
2109 | * r26: VMM extension block's physical address | |
2110 | * r27: host pmap's physical address | |
2111 | * r28: guest pmap's physical address | |
2112 | * | |
2113 | */ | |
2114 | ||
2115 | .align 5 | |
2116 | .globl EXT(hw_scrub_guest) | |
2117 | ||
2118 | LEXT(hw_scrub_guest) | |
2119 | stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack | |
2120 | mflr r0 ; Save the link register | |
2121 | stw r15,FM_ARG0+0x00(r1) ; Save a register | |
2122 | stw r16,FM_ARG0+0x04(r1) ; Save a register | |
2123 | stw r17,FM_ARG0+0x08(r1) ; Save a register | |
2124 | mfsprg r2,2 ; Get feature flags | |
2125 | stw r18,FM_ARG0+0x0C(r1) ; Save a register | |
2126 | stw r19,FM_ARG0+0x10(r1) ; Save a register | |
2127 | stw r20,FM_ARG0+0x14(r1) ; Save a register | |
2128 | stw r21,FM_ARG0+0x18(r1) ; Save a register | |
2129 | stw r22,FM_ARG0+0x1C(r1) ; Save a register | |
2130 | mtcrf 0x02,r2 ; move pf64Bit cr6 | |
2131 | stw r23,FM_ARG0+0x20(r1) ; Save a register | |
2132 | stw r24,FM_ARG0+0x24(r1) ; Save a register | |
2133 | stw r25,FM_ARG0+0x28(r1) ; Save a register | |
2134 | stw r26,FM_ARG0+0x2C(r1) ; Save a register | |
2135 | stw r27,FM_ARG0+0x30(r1) ; Save a register | |
2136 | li r6,0 ; Set no next address return | |
2137 | stw r28,FM_ARG0+0x34(r1) ; Save a register | |
2138 | stw r29,FM_ARG0+0x38(r1) ; Save a register | |
2139 | stw r30,FM_ARG0+0x3C(r1) ; Save a register | |
2140 | stw r31,FM_ARG0+0x40(r1) ; Save a register | |
2141 | stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr | |
2142 | stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
2143 | ||
2144 | lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr | |
2145 | ||
2146 | bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine | |
2147 | lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr | |
2148 | lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt | |
2149 | b hsgStart ; Get to work | |
2150 | ||
2151 | hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr | |
2152 | ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt | |
2153 | ||
2154 | hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode | |
2155 | xor r27,r4,r9 ; Convert host pmap_t virt->real | |
2156 | bl mapPhysLock ; Lock the physent | |
2157 | ||
2158 | bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine | |
2159 | ||
2160 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
2161 | hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address | |
2162 | beq hsg32Miss ; Did not find one... | |
2163 | lwz r8,mpFlags(r12) ; Get mapping's flags | |
2164 | lhz r7,mpSpace(r12) ; Get mapping's space id | |
2165 | rlwinm r8,r8,0,mpType ; Extract mapping's type code | |
2166 | lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2167 | xori r8,r8,mpGuest ; Is it a guest mapping? | |
2168 | ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2169 | slwi r9,r7,2 ; Multiply space by 4 | |
2170 | lwz r28,0(r28) ; Get the actual translation map | |
2171 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
2172 | slwi r7,r7,3 ; Multiply space by 8 | |
2173 | lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr | |
2174 | add r7,r7,r9 ; Get correct displacement into translate table | |
2175 | add r28,r28,r7 ; Point to the pmap translation | |
2176 | lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr | |
2177 | lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr | |
2178 | xor r7,r7,r26 ; Is guest associated with specified host? | |
2179 | or. r7,r7,r8 ; Guest mapping && associated with host? | |
2180 | lwz r12,mpAlias+4(r12) ; Chain on to the next | |
2181 | bne hsg32Loop ; Try next mapping on alias chain | |
2182 | ||
2183 | hsg32Hit: bl mapPhysUnlock ; Unlock physent chain | |
2184 | b hrmJoin ; Join common path for mapping removal | |
2185 | ||
2186 | .align 5 | |
2187 | hsg32Miss: bl mapPhysUnlock ; Unlock physent chain | |
2188 | mtmsr r11 ; Restore 'rupts, translation | |
2189 | isync ; Throw a small wrench into the pipeline | |
2190 | li r3,mapRtEmpty ; No mappings found matching specified criteria | |
2191 | b hrmRetnCmn ; Exit through common epilog | |
2192 | ||
2193 | .align 5 | |
2194 | hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed | |
2195 | ld r12,ppLink(r3) ; Grab the pointer to the first mapping | |
2196 | rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
2197 | hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address | |
2198 | beq hsg64Miss ; Did not find one... | |
2199 | lwz r8,mpFlags(r12) ; Get mapping's flags | |
2200 | lhz r7,mpSpace(r12) ; Get mapping's space id | |
2201 | rlwinm r8,r8,0,mpType ; Extract mapping's type code | |
2202 | lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2203 | xori r8,r8,mpGuest ; Is it a guest mapping? | |
2204 | ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table | |
2205 | slwi r9,r7,2 ; Multiply space by 4 | |
2206 | lwz r28,0(r28) ; Get the actual translation map | |
2207 | lwz r4,mpVAddr(r12) ; Get the top of the vaddr | |
2208 | slwi r7,r7,3 ; Multiply space by 8 | |
2209 | lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr | |
2210 | add r7,r7,r9 ; Get correct displacement into translate table | |
2211 | add r28,r28,r7 ; Point to the pmap translation | |
2212 | ld r28,pmapPAddr(r28) ; Get guest pmap paddr | |
2213 | ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr | |
2214 | xor r7,r7,r26 ; Is guest associated with specified host? | |
2215 | or. r7,r7,r8 ; Guest mapping && associated with host? | |
2216 | ld r12,mpAlias(r12) ; Chain on to the next | |
2217 | bne hsg64Loop ; Try next mapping on alias chain | |
2218 | ||
2219 | hsg64Hit: bl mapPhysUnlock ; Unlock physent chain | |
2220 | b hrmJoin ; Join common path for mapping removal | |
2221 | ||
2222 | .align 5 | |
2223 | hsg64Miss: bl mapPhysUnlock ; Unlock physent chain | |
b36670ce | 2224 | mtmsrd r11 ; Restore 'rupts, translation |
91447636 A |
2225 | li r3,mapRtEmpty ; No mappings found matching specified criteria |
2226 | b hrmRetnCmn ; Exit through common epilog | |
2227 | ||
1c79356b A |
2228 | |
2229 | /* | |
55e303ae A |
2230 | * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space |
2231 | * | |
2232 | * Upon entry, R3 contains a pointer to a physent. | |
2233 | * space is the space ID from the pmap in question | |
2234 | * | |
2235 | * We return the virtual address of the found mapping in | |
2236 | * R3. Note that the mapping busy is bumped. | |
2237 | * | |
2238 | * Note that this is designed to be called from 32-bit mode with a stack. | |
2239 | * | |
2240 | * We disable translation and all interruptions here. This keeps is | |
2241 | * from having to worry about a deadlock due to having anything locked | |
2242 | * and needing it to process a fault. | |
2243 | * | |
1c79356b A |
2244 | */ |
2245 | ||
2246 | .align 5 | |
55e303ae A |
2247 | .globl EXT(hw_find_space) |
2248 | ||
2249 | LEXT(hw_find_space) | |
2250 | stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack | |
2251 | mflr r0 ; Save the link register | |
2252 | mr r8,r4 ; Remember the space | |
2253 | stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
2254 | ||
2255 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
1c79356b | 2256 | |
55e303ae | 2257 | bl mapPhysLock ; Lock the physent |
1c79356b | 2258 | |
55e303ae A |
2259 | bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint) |
2260 | ||
2261 | lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping | |
d7e50217 | 2262 | |
91447636 | 2263 | hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address |
55e303ae A |
2264 | beq hfsNone ; Did not find one... |
2265 | ||
2266 | lhz r10,mpSpace(r12) ; Get the space | |
2267 | ||
2268 | cmplw r10,r8 ; Is this one of ours? | |
2269 | beq hfsFnd ; Yes... | |
2270 | ||
2271 | lwz r12,mpAlias+4(r12) ; Chain on to the next | |
2272 | b hfsSrc32 ; Check it out... | |
1c79356b | 2273 | |
55e303ae A |
2274 | .align 5 |
2275 | ||
91447636 | 2276 | hfsSF: li r0,ppLFAmask |
55e303ae | 2277 | ld r12,ppLink(r3) ; Get the pointer to the first mapping |
91447636 | 2278 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
2279 | |
2280 | hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address | |
2281 | beq hfsNone ; Did not find one... | |
2282 | ||
2283 | lhz r10,mpSpace(r12) ; Get the space | |
2284 | ||
2285 | cmplw r10,r8 ; Is this one of ours? | |
2286 | beq hfsFnd ; Yes... | |
2287 | ||
2288 | ld r12,mpAlias(r12) ; Chain on to the next | |
2289 | b hfsSrc64 ; Check it out... | |
2290 | ||
2291 | .align 5 | |
2292 | ||
2293 | hfsFnd: mr r8,r3 ; Save the physent | |
2294 | mr r3,r12 ; Point to the mapping | |
2295 | bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear | |
1c79356b | 2296 | |
55e303ae A |
2297 | mr r3,r8 ; Get back the physical entry |
2298 | li r7,0xFFF ; Get a page size mask | |
2299 | bl mapPhysUnlock ; Time to unlock the physical entry | |
1c79356b | 2300 | |
55e303ae A |
2301 | andc r3,r12,r7 ; Move the mapping back down to a page |
2302 | lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap | |
2303 | xor r12,r3,r12 ; Convert to virtual | |
2304 | b hfsRet ; Time to return | |
2305 | ||
2306 | .align 5 | |
2307 | ||
2308 | hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry | |
2309 | ||
2310 | hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)... | |
1c79356b | 2311 | |
55e303ae A |
2312 | mtmsr r11 ; Restore enables/translation/etc. |
2313 | isync | |
2314 | b hfsRetnCmn ; Join the common return code... | |
1c79356b | 2315 | |
55e303ae A |
2316 | hfsSF3: mtmsrd r11 ; Restore enables/translation/etc. |
2317 | isync | |
1c79356b | 2318 | |
55e303ae A |
2319 | ; |
2320 | ; NOTE: we have not used any registers other than the volatiles to this point | |
2321 | ; | |
1c79356b | 2322 | |
55e303ae | 2323 | hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed |
91447636 A |
2324 | |
2325 | #if DEBUG | |
2326 | mr. r3,r3 ; Anything to return? | |
2327 | beq hfsRetnNull ; Nope | |
2328 | lwz r11,mpFlags(r3) ; Get mapping flags | |
2329 | rlwinm r0,r11,0,mpType ; Isolate the mapping type | |
2330 | cmplwi r0,mpGuest ; Shadow guest mapping? | |
2331 | beq hfsPanic ; Yup, kick the bucket | |
2332 | hfsRetnNull: | |
2333 | #endif | |
2334 | ||
55e303ae A |
2335 | lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
2336 | ||
2337 | mtlr r12 ; Restore the return | |
2338 | lwz r1,0(r1) ; Pop the stack | |
2339 | blr ; Leave... | |
1c79356b | 2340 | |
91447636 A |
2341 | hfsPanic: lis r0,hi16(Choke) ; System abend |
2342 | ori r0,r0,lo16(Choke) ; System abend | |
2343 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
2344 | sc | |
1c79356b | 2345 | |
55e303ae A |
2346 | ; |
2347 | ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap | |
2348 | ; Returns 0 if not found or the virtual address of the mapping if | |
2349 | ; if is. Also, the mapping has the busy count bumped. | |
2350 | ; | |
2351 | .align 5 | |
2352 | .globl EXT(hw_find_map) | |
1c79356b | 2353 | |
55e303ae A |
2354 | LEXT(hw_find_map) |
2355 | stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
2356 | mflr r0 ; Save the link register | |
2357 | stw r25,FM_ARG0+0x00(r1) ; Save a register | |
2358 | stw r26,FM_ARG0+0x04(r1) ; Save a register | |
2359 | mr r25,r6 ; Remember address of next va | |
2360 | stw r27,FM_ARG0+0x08(r1) ; Save a register | |
2361 | stw r28,FM_ARG0+0x0C(r1) ; Save a register | |
2362 | stw r29,FM_ARG0+0x10(r1) ; Save a register | |
2363 | stw r30,FM_ARG0+0x14(r1) ; Save a register | |
2364 | stw r31,FM_ARG0+0x18(r1) ; Save a register | |
2365 | stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1c79356b | 2366 | |
91447636 A |
2367 | #if DEBUG |
2368 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
2369 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
2370 | bne hfmPanic ; Call not valid for guest shadow assist pmap | |
2371 | #endif | |
2372 | ||
55e303ae A |
2373 | lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap |
2374 | lwz r7,pmapvr+4(r3) ; Get the second part | |
1c79356b | 2375 | |
1c79356b | 2376 | |
55e303ae A |
2377 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit |
2378 | ||
2379 | mr r27,r11 ; Remember the old MSR | |
2380 | mr r26,r12 ; Remember the feature bits | |
9bccf70c | 2381 | |
55e303ae | 2382 | xor r28,r3,r7 ; Change the common 32- and 64-bit half |
9bccf70c | 2383 | |
55e303ae | 2384 | bf-- pf64Bitb,hfmSF1 ; skip if 32-bit... |
1c79356b | 2385 | |
55e303ae | 2386 | rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top |
1c79356b | 2387 | |
55e303ae A |
2388 | hfmSF1: mr r29,r4 ; Save top half of vaddr |
2389 | mr r30,r5 ; Save the bottom half | |
2390 | ||
2391 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
2392 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
2393 | mr. r3,r3 ; Did we get the lock? | |
2394 | bne-- hfmBadLock ; Nope... | |
1c79356b | 2395 | |
55e303ae A |
2396 | mr r3,r28 ; get the pmap address |
2397 | mr r4,r29 ; Get bits 0:31 to look for | |
2398 | mr r5,r30 ; Get bits 32:64 | |
2399 | ||
2400 | bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags) | |
1c79356b | 2401 | |
55e303ae A |
2402 | rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit |
2403 | mr. r31,r3 ; Save the mapping if we found it | |
2404 | cmplwi cr1,r0,0 ; Are we removing? | |
2405 | mr r29,r4 ; Save next va high half | |
2406 | crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing | |
2407 | mr r30,r5 ; Save next va low half | |
2408 | li r6,0 ; Assume we did not find it | |
2409 | li r26,0xFFF ; Get a mask to relocate to start of mapping page | |
1c79356b | 2410 | |
55e303ae | 2411 | bt-- cr0_eq,hfmNotFnd ; We did not find it... |
1c79356b | 2412 | |
55e303ae | 2413 | bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear |
1c79356b | 2414 | |
55e303ae | 2415 | andc r4,r31,r26 ; Get back to the mapping page start |
1c79356b | 2416 | |
55e303ae A |
2417 | ; Note: we can treat 32- and 64-bit the same here. Because we are going from |
2418 | ; physical to virtual and we only do 32-bit virtual, we only need the low order | |
2419 | ; word of the xor. | |
d7e50217 | 2420 | |
55e303ae A |
2421 | lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap |
2422 | li r6,-1 ; Indicate we found it and it is not being removed | |
2423 | xor r31,r31,r4 ; Flip to virtual | |
d7e50217 | 2424 | |
55e303ae A |
2425 | hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
2426 | bl sxlkUnlock ; Unlock the search list | |
d7e50217 | 2427 | |
55e303ae A |
2428 | rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit |
2429 | and r3,r3,r6 ; Clear if not found or removing | |
de355530 | 2430 | |
55e303ae | 2431 | hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes... |
de355530 | 2432 | |
55e303ae A |
2433 | mtmsr r27 ; Restore enables/translation/etc. |
2434 | isync | |
2435 | b hfmReturnC ; Join common... | |
2436 | ||
2437 | hfmR64: mtmsrd r27 ; Restore enables/translation/etc. | |
2438 | isync | |
2439 | ||
2440 | hfmReturnC: stw r29,0(r25) ; Save the top of the next va | |
2441 | stw r30,4(r25) ; Save the bottom of the next va | |
2442 | lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
2443 | lwz r25,FM_ARG0+0x00(r1) ; Restore a register | |
2444 | lwz r26,FM_ARG0+0x04(r1) ; Restore a register | |
2445 | and r3,r3,r6 ; Clear return if the mapping is being removed | |
2446 | lwz r27,FM_ARG0+0x08(r1) ; Restore a register | |
2447 | mtlr r0 ; Restore the return | |
2448 | lwz r28,FM_ARG0+0x0C(r1) ; Restore a register | |
2449 | lwz r29,FM_ARG0+0x10(r1) ; Restore a register | |
2450 | lwz r30,FM_ARG0+0x14(r1) ; Restore a register | |
2451 | lwz r31,FM_ARG0+0x18(r1) ; Restore a register | |
2452 | lwz r1,0(r1) ; Pop the stack | |
2453 | blr ; Leave... | |
2454 | ||
2455 | .align 5 | |
2456 | ||
2457 | hfmBadLock: li r3,1 ; Set lock time out error code | |
2458 | b hfmReturn ; Leave.... | |
1c79356b | 2459 | |
91447636 A |
2460 | hfmPanic: lis r0,hi16(Choke) ; System abend |
2461 | ori r0,r0,lo16(Choke) ; System abend | |
2462 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
2463 | sc | |
2464 | ||
2465 | ||
2466 | /* | |
2467 | * void hw_clear_maps(void) | |
2468 | * | |
2469 | * Remove all mappings for all phys entries. | |
2470 | * | |
2471 | * | |
2472 | */ | |
2473 | ||
2474 | .align 5 | |
2475 | .globl EXT(hw_clear_maps) | |
2476 | ||
2477 | LEXT(hw_clear_maps) | |
2478 | mflr r10 ; Save the link register | |
2479 | mfcr r9 ; Save the condition register | |
2480 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
2481 | ||
2482 | lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
2483 | ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
2484 | ||
2485 | hcmNextRegion: | |
2486 | lwz r3,mrPhysTab(r5) ; Get the actual table address | |
2487 | lwz r0,mrStart(r5) ; Get start of table entry | |
2488 | lwz r4,mrEnd(r5) ; Get end of table entry | |
2489 | addi r5,r5,mrSize ; Point to the next regions | |
2490 | ||
2491 | cmplwi r3,0 ; No more regions? | |
2492 | beq-- hcmDone ; Leave... | |
2493 | ||
2494 | sub r4,r4,r0 ; Calculate physical entry count | |
2495 | addi r4,r4,1 | |
2496 | mtctr r4 | |
2497 | ||
2498 | bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version | |
2499 | ||
2500 | ||
2501 | hcmNextPhys32: | |
2502 | lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping | |
2503 | addi r3,r3,physEntrySize ; Next phys_entry | |
2504 | ||
2505 | hcmNextMap32: | |
3a60a9f5 | 2506 | rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address |
91447636 A |
2507 | beq hcmNoMap32 ; Did not find one... |
2508 | ||
2509 | lwz r0,mpPte(r4) ; Grab the offset to the PTE | |
2510 | rlwinm r0,r0,0,~mpHValid ; Clear out valid bit | |
2511 | stw r0,mpPte(r4) ; Get the quick pointer again | |
2512 | ||
2513 | lwz r4,mpAlias+4(r4) ; Chain on to the next | |
2514 | b hcmNextMap32 ; Check it out... | |
2515 | hcmNoMap32: | |
2516 | bdnz hcmNextPhys32 | |
2517 | b hcmNextRegion | |
2518 | ||
2519 | ||
2520 | .align 5 | |
2521 | hcmNextPhys64: | |
2522 | li r0,ppLFAmask ; Get mask to clean up mapping pointer | |
2523 | ld r4,ppLink(r3) ; Get the pointer to the first mapping | |
2524 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
2525 | addi r3,r3,physEntrySize ; Next phys_entry | |
2526 | ||
2527 | hcmNextMap64: | |
2528 | andc. r4,r4,r0 ; Clean and test mapping address | |
2529 | beq hcmNoMap64 ; Did not find one... | |
2530 | ||
2531 | lwz r0,mpPte(r4) ; Grab the offset to the PTE | |
2532 | rlwinm r0,r0,0,~mpHValid ; Clear out valid bit | |
2533 | stw r0,mpPte(r4) ; Get the quick pointer again | |
2534 | ||
2535 | ld r4,mpAlias(r4) ; Chain on to the next | |
2536 | li r0,ppLFAmask ; Get mask to clean up mapping pointer | |
2537 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
2538 | b hcmNextMap64 ; Check it out... | |
2539 | hcmNoMap64: | |
2540 | bdnz hcmNextPhys64 | |
2541 | b hcmNextRegion | |
2542 | ||
2543 | ||
2544 | .align 5 | |
2545 | hcmDone: | |
2546 | mtlr r10 ; Restore the return | |
2547 | mtcr r9 ; Restore the condition register | |
2548 | bt++ pf64Bitb,hcmDone64 ; 64-bit version | |
2549 | hcmDone32: | |
2550 | mtmsr r11 ; Restore translation/mode/etc. | |
2551 | isync | |
2552 | blr ; Leave... | |
2553 | ||
2554 | hcmDone64: | |
2555 | mtmsrd r11 ; Restore translation/mode/etc. | |
2556 | isync | |
2557 | blr ; Leave... | |
2558 | ||
2559 | ||
1c79356b A |
2560 | |
2561 | /* | |
91447636 | 2562 | * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod) |
55e303ae A |
2563 | * walks all mapping for a physical page and performs |
2564 | * specified operations on each. | |
1c79356b | 2565 | * |
55e303ae A |
2566 | * pp is unlocked physent |
2567 | * preop is operation to perform on physent before walk. This would be | |
2568 | * used to set cache attribute or protection | |
2569 | * op is the operation to perform on each mapping during walk | |
2570 | * postop is operation to perform in the phsyent after walk. this would be | |
2571 | * used to set or reset the RC bits. | |
91447636 A |
2572 | * opmod modifies the action taken on any connected PTEs visited during |
2573 | * the mapping walk. | |
55e303ae A |
2574 | * |
2575 | * We return the RC bits from before postop is run. | |
2576 | * | |
2577 | * Note that this is designed to be called from 32-bit mode with a stack. | |
1c79356b | 2578 | * |
55e303ae A |
2579 | * We disable translation and all interruptions here. This keeps is |
2580 | * from having to worry about a deadlock due to having anything locked | |
2581 | * and needing it to process a fault. | |
d7e50217 | 2582 | * |
55e303ae A |
2583 | * We lock the physent, execute preop, and then walk each mapping in turn. |
2584 | * If there is a PTE, it is invalidated and the RC merged into the physent. | |
2585 | * Then we call the op function. | |
2586 | * Then we revalidate the PTE. | |
2587 | * Once all all mappings are finished, we save the physent RC and call the | |
2588 | * postop routine. Then we unlock the physent and return the RC. | |
2589 | * | |
2590 | * | |
1c79356b A |
2591 | */ |
2592 | ||
1c79356b | 2593 | .align 5 |
55e303ae A |
2594 | .globl EXT(hw_walk_phys) |
2595 | ||
2596 | LEXT(hw_walk_phys) | |
91447636 | 2597 | stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack |
55e303ae | 2598 | mflr r0 ; Save the link register |
91447636 A |
2599 | stw r24,FM_ARG0+0x00(r1) ; Save a register |
2600 | stw r25,FM_ARG0+0x04(r1) ; Save a register | |
2601 | stw r26,FM_ARG0+0x08(r1) ; Save a register | |
2602 | stw r27,FM_ARG0+0x0C(r1) ; Save a register | |
2603 | mr r24,r8 ; Save the parm | |
55e303ae | 2604 | mr r25,r7 ; Save the parm |
91447636 A |
2605 | stw r28,FM_ARG0+0x10(r1) ; Save a register |
2606 | stw r29,FM_ARG0+0x14(r1) ; Save a register | |
2607 | stw r30,FM_ARG0+0x18(r1) ; Save a register | |
2608 | stw r31,FM_ARG0+0x1C(r1) ; Save a register | |
2609 | stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
55e303ae A |
2610 | |
2611 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit | |
91447636 A |
2612 | |
2613 | mfsprg r26,0 ; (INSTRUMENTATION) | |
2614 | lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION) | |
2615 | addi r27,r27,1 ; (INSTRUMENTATION) | |
2616 | stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION) | |
2617 | la r26,hwWalkFull(r26) ; (INSTRUMENTATION) | |
2618 | slwi r12,r24,2 ; (INSTRUMENTATION) | |
2619 | lwzx r27,r26,r12 ; (INSTRUMENTATION) | |
2620 | addi r27,r27,1 ; (INSTRUMENTATION) | |
2621 | stwx r27,r26,r12 ; (INSTRUMENTATION) | |
55e303ae A |
2622 | |
2623 | mr r26,r11 ; Save the old MSR | |
2624 | lis r27,hi16(hwpOpBase) ; Get high order of op base | |
2625 | slwi r4,r4,7 ; Convert preop to displacement | |
2626 | ori r27,r27,lo16(hwpOpBase) ; Get low order of op base | |
2627 | slwi r5,r5,7 ; Convert op to displacement | |
2628 | add r12,r4,r27 ; Point to the preop routine | |
2629 | slwi r28,r6,7 ; Convert postop to displacement | |
2630 | mtctr r12 ; Set preop routine | |
2631 | add r28,r28,r27 ; Get the address of the postop routine | |
2632 | add r27,r5,r27 ; Get the address of the op routine | |
1c79356b | 2633 | |
55e303ae | 2634 | bl mapPhysLock ; Lock the physent |
1c79356b | 2635 | |
55e303ae A |
2636 | mr r29,r3 ; Save the physent address |
2637 | ||
2638 | bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint) | |
2639 | ||
2640 | bctrl ; Call preop routine | |
2641 | bne- hwpEarly32 ; preop says to bail now... | |
91447636 A |
2642 | |
2643 | cmplwi r24,hwpMergePTE ; Classify operation modifier | |
55e303ae A |
2644 | mtctr r27 ; Set up the op function address |
2645 | lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping | |
91447636 A |
2646 | blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping |
2647 | beq hwpMSrc32 ; Do TLB merge for each mapping | |
2648 | ||
3a60a9f5 | 2649 | hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address |
91447636 | 2650 | beq hwpNone32 ; Did not find one... |
55e303ae | 2651 | |
91447636 A |
2652 | bctrl ; Call the op function |
2653 | ||
2654 | bne- hwpEarly32 ; op says to bail now... | |
2655 | lwz r31,mpAlias+4(r31) ; Chain on to the next | |
2656 | b hwpQSrc32 ; Check it out... | |
2657 | ||
2658 | .align 5 | |
3a60a9f5 | 2659 | hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address |
55e303ae | 2660 | beq hwpNone32 ; Did not find one... |
91447636 A |
2661 | |
2662 | bl mapMergeRC32 ; Merge reference and change into mapping and physent | |
2663 | bctrl ; Call the op function | |
2664 | ||
2665 | bne- hwpEarly32 ; op says to bail now... | |
2666 | lwz r31,mpAlias+4(r31) ; Chain on to the next | |
2667 | b hwpMSrc32 ; Check it out... | |
d7e50217 | 2668 | |
91447636 A |
2669 | .align 5 |
2670 | hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address | |
2671 | beq hwpNone32 ; Did not find one... | |
2672 | ||
55e303ae A |
2673 | ; |
2674 | ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4, | |
2675 | ; PTE low in R5. The PCA address is in R7. The PTEG come back locked. | |
2676 | ; If there is no PTE, PTE low is obtained from mapping | |
2677 | ; | |
2678 | bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent | |
2679 | ||
2680 | bctrl ; Call the op function | |
2681 | ||
2682 | crmove cr1_eq,cr0_eq ; Save the return code | |
2683 | ||
2684 | mr. r3,r3 ; Was there a previously valid PTE? | |
2685 | beq- hwpNxt32 ; Nope... | |
1c79356b | 2686 | |
55e303ae A |
2687 | stw r5,4(r3) ; Store second half of PTE |
2688 | eieio ; Make sure we do not reorder | |
2689 | stw r4,0(r3) ; Revalidate the PTE | |
2690 | ||
2691 | eieio ; Make sure all updates come first | |
2692 | stw r6,0(r7) ; Unlock the PCA | |
d7e50217 | 2693 | |
55e303ae A |
2694 | hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now... |
2695 | lwz r31,mpAlias+4(r31) ; Chain on to the next | |
2696 | b hwpSrc32 ; Check it out... | |
1c79356b | 2697 | |
55e303ae | 2698 | .align 5 |
1c79356b | 2699 | |
55e303ae | 2700 | hwpNone32: mtctr r28 ; Get the post routine address |
1c79356b | 2701 | |
55e303ae A |
2702 | lwz r30,ppLink+4(r29) ; Save the old RC |
2703 | mr r3,r29 ; Get the physent address | |
2704 | bctrl ; Call post routine | |
1c79356b | 2705 | |
55e303ae A |
2706 | bl mapPhysUnlock ; Unlock the physent |
2707 | ||
2708 | mtmsr r26 ; Restore translation/mode/etc. | |
2709 | isync | |
1c79356b | 2710 | |
55e303ae | 2711 | b hwpReturn ; Go restore registers and return... |
1c79356b | 2712 | |
55e303ae | 2713 | .align 5 |
1c79356b | 2714 | |
55e303ae A |
2715 | hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC |
2716 | mr r3,r29 ; Get the physent address | |
2717 | bl mapPhysUnlock ; Unlock the physent | |
2718 | ||
2719 | mtmsr r26 ; Restore translation/mode/etc. | |
2720 | isync | |
2721 | ||
2722 | b hwpReturn ; Go restore registers and return... | |
1c79356b | 2723 | |
55e303ae | 2724 | .align 5 |
1c79356b | 2725 | |
55e303ae A |
2726 | hwp64: bctrl ; Call preop routine |
2727 | bne-- hwpEarly64 ; preop says to bail now... | |
d7e50217 | 2728 | |
91447636 | 2729 | cmplwi r24,hwpMergePTE ; Classify operation modifier |
55e303ae A |
2730 | mtctr r27 ; Set up the op function address |
2731 | ||
91447636 | 2732 | li r24,ppLFAmask |
55e303ae | 2733 | ld r31,ppLink(r3) ; Get the pointer to the first mapping |
91447636 A |
2734 | rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
2735 | blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping | |
2736 | beq hwpMSrc64 ; Do TLB merge for each mapping | |
55e303ae | 2737 | |
91447636 A |
2738 | hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address |
2739 | beq hwpNone64 ; Did not find one... | |
2740 | ||
2741 | bctrl ; Call the op function | |
2742 | ||
2743 | bne-- hwpEarly64 ; op says to bail now... | |
2744 | ld r31,mpAlias(r31) ; Chain on to the next | |
2745 | b hwpQSrc64 ; Check it out... | |
2746 | ||
2747 | .align 5 | |
2748 | hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address | |
2749 | beq hwpNone64 ; Did not find one... | |
2750 | ||
2751 | bl mapMergeRC64 ; Merge reference and change into mapping and physent | |
2752 | bctrl ; Call the op function | |
2753 | ||
2754 | bne-- hwpEarly64 ; op says to bail now... | |
2755 | ld r31,mpAlias(r31) ; Chain on to the next | |
2756 | b hwpMSrc64 ; Check it out... | |
2757 | ||
2758 | .align 5 | |
2759 | hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address | |
55e303ae A |
2760 | beq hwpNone64 ; Did not find one... |
2761 | ; | |
2762 | ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4, | |
2763 | ; PTE low in R5. PTEG comes back locked if there is one | |
2764 | ; | |
2765 | bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent | |
1c79356b | 2766 | |
55e303ae | 2767 | bctrl ; Call the op function |
1c79356b | 2768 | |
55e303ae | 2769 | crmove cr1_eq,cr0_eq ; Save the return code |
1c79356b | 2770 | |
55e303ae A |
2771 | mr. r3,r3 ; Was there a previously valid PTE? |
2772 | beq-- hwpNxt64 ; Nope... | |
2773 | ||
2774 | std r5,8(r3) ; Save bottom of PTE | |
2775 | eieio ; Make sure we do not reorder | |
2776 | std r4,0(r3) ; Revalidate the PTE | |
d7e50217 | 2777 | |
55e303ae A |
2778 | eieio ; Make sure all updates come first |
2779 | stw r6,0(r7) ; Unlock the PCA | |
2780 | ||
2781 | hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now... | |
2782 | ld r31,mpAlias(r31) ; Chain on to the next | |
55e303ae | 2783 | b hwpSrc64 ; Check it out... |
1c79356b | 2784 | |
55e303ae A |
2785 | .align 5 |
2786 | ||
2787 | hwpNone64: mtctr r28 ; Get the post routine address | |
2788 | ||
2789 | lwz r30,ppLink+4(r29) ; Save the old RC | |
2790 | mr r3,r29 ; Get the physent address | |
2791 | bctrl ; Call post routine | |
2792 | ||
2793 | bl mapPhysUnlock ; Unlock the physent | |
2794 | ||
2795 | mtmsrd r26 ; Restore translation/mode/etc. | |
1c79356b | 2796 | isync |
55e303ae A |
2797 | b hwpReturn ; Go restore registers and return... |
2798 | ||
2799 | .align 5 | |
1c79356b | 2800 | |
55e303ae A |
2801 | hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC |
2802 | mr r3,r29 ; Get the physent address | |
2803 | bl mapPhysUnlock ; Unlock the physent | |
2804 | ||
2805 | mtmsrd r26 ; Restore translation/mode/etc. | |
2806 | isync | |
2807 | ||
91447636 A |
2808 | hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return |
2809 | lwz r24,FM_ARG0+0x00(r1) ; Restore a register | |
2810 | lwz r25,FM_ARG0+0x04(r1) ; Restore a register | |
2811 | lwz r26,FM_ARG0+0x08(r1) ; Restore a register | |
55e303ae | 2812 | mr r3,r30 ; Pass back the RC |
91447636 A |
2813 | lwz r27,FM_ARG0+0x0C(r1) ; Restore a register |
2814 | lwz r28,FM_ARG0+0x10(r1) ; Restore a register | |
55e303ae | 2815 | mtlr r0 ; Restore the return |
91447636 A |
2816 | lwz r29,FM_ARG0+0x14(r1) ; Restore a register |
2817 | lwz r30,FM_ARG0+0x18(r1) ; Restore a register | |
2818 | lwz r31,FM_ARG0+0x1C(r1) ; Restore a register | |
55e303ae A |
2819 | lwz r1,0(r1) ; Pop the stack |
2820 | blr ; Leave... | |
d7e50217 | 2821 | |
d7e50217 | 2822 | |
55e303ae A |
2823 | ; |
2824 | ; The preop/op/postop function table. | |
2825 | ; Each function must be 64-byte aligned and be no more than | |
2826 | ; 16 instructions. If more than 16, we must fix address calculations | |
2827 | ; at the start of hwpOpBase | |
2828 | ; | |
2829 | ; The routine must set CR0_EQ in order to continue scan. | |
2830 | ; If CR0_EQ is not set, an early return from the function is made. | |
2831 | ; | |
d7e50217 | 2832 | |
55e303ae A |
2833 | .align 7 |
2834 | ||
2835 | hwpOpBase: | |
2836 | ||
2837 | ; Function 0 - No operation | |
2838 | ||
2839 | hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set | |
2840 | blr ; Just return... | |
1c79356b A |
2841 | |
2842 | .align 5 | |
1c79356b | 2843 | |
55e303ae | 2844 | ; This is the continuation of function 4 - Set attributes in mapping |
1c79356b | 2845 | |
55e303ae A |
2846 | ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes. |
2847 | ; NOTE: Do we have to deal with i-cache here? | |
2848 | ||
91447636 | 2849 | hwpSAM: li r11,4096 ; Get page size |
d7e50217 | 2850 | |
55e303ae A |
2851 | hwpSAMinvd: sub. r11,r11,r9 ; Back off a line |
2852 | dcbf r11,r5 ; Flush the line in the data cache | |
2853 | bgt++ hwpSAMinvd ; Go do the rest of it... | |
2854 | ||
2855 | sync ; Make sure it is done | |
1c79356b | 2856 | |
91447636 | 2857 | li r11,4096 ; Get page size |
55e303ae A |
2858 | |
2859 | hwpSAMinvi: sub. r11,r11,r9 ; Back off a line | |
2860 | icbi r11,r5 ; Flush the line in the icache | |
2861 | bgt++ hwpSAMinvi ; Go do the rest of it... | |
2862 | ||
2863 | sync ; Make sure it is done | |
1c79356b | 2864 | |
55e303ae A |
2865 | cmpw r0,r0 ; Make sure we return CR0_EQ |
2866 | blr ; Return... | |
1c79356b | 2867 | |
1c79356b | 2868 | |
91447636 | 2869 | ; Function 1 - Set protection in physent (obsolete) |
1c79356b | 2870 | |
55e303ae A |
2871 | .set .,hwpOpBase+(1*128) ; Generate error if previous function too long |
2872 | ||
91447636 | 2873 | hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ |
55e303ae | 2874 | blr ; Return... |
1c79356b | 2875 | |
1c79356b | 2876 | |
55e303ae | 2877 | ; Function 2 - Set protection in mapping |
1c79356b | 2878 | |
55e303ae | 2879 | .set .,hwpOpBase+(2*128) ; Generate error if previous function too long |
1c79356b | 2880 | |
55e303ae A |
2881 | hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags |
2882 | lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping | |
2883 | rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent? | |
8ad349bb | 2884 | li r0,lo16(mpN|mpPP) ; Get no-execute and protection bits |
55e303ae | 2885 | crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent |
8ad349bb | 2886 | rlwinm r2,r25,0,mpNb-32,mpPPe-32 ; Isolate new no-execute and protection bits |
55e303ae | 2887 | beqlr-- ; Leave if permanent mapping (before we trash R5)... |
8ad349bb A |
2888 | andc r5,r5,r0 ; Clear the old no-execute and prot bits |
2889 | or r5,r5,r2 ; Move in the new no-execute and prot bits | |
55e303ae A |
2890 | rlwimi r8,r5,0,20,31 ; Copy into the mapping copy |
2891 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
2892 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
2893 | blr ; Leave... | |
2894 | ||
2895 | ; Function 3 - Set attributes in physent | |
1c79356b | 2896 | |
55e303ae | 2897 | .set .,hwpOpBase+(3*128) ; Generate error if previous function too long |
1c79356b | 2898 | |
91447636 | 2899 | hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent |
1c79356b | 2900 | |
55e303ae | 2901 | hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags |
91447636 | 2902 | rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes |
55e303ae A |
2903 | stwcx. r4,r5,r29 ; Try to stuff it |
2904 | bne-- hwpSAtrPhX ; Try again... | |
2905 | ; Note: CR0_EQ is set because of stwcx. | |
2906 | blr ; Return... | |
de355530 | 2907 | |
55e303ae | 2908 | ; Function 4 - Set attributes in mapping |
d7e50217 | 2909 | |
55e303ae A |
2910 | .set .,hwpOpBase+(4*128) ; Generate error if previous function too long |
2911 | ||
2912 | hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags | |
2913 | lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping | |
91447636 | 2914 | li r2,mpM ; Force on coherent |
55e303ae A |
2915 | rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent? |
2916 | li r0,lo16(mpWIMG) ; Get wimg mask | |
2917 | crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent | |
91447636 A |
2918 | rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32 |
2919 | ; Copy in the cache inhibited bit | |
55e303ae A |
2920 | beqlr-- ; Leave if permanent mapping (before we trash R5)... |
2921 | andc r5,r5,r0 ; Clear the old wimg | |
91447636 A |
2922 | rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32 |
2923 | ; Copy in the guarded bit | |
55e303ae A |
2924 | mfsprg r9,2 ; Feature flags |
2925 | or r5,r5,r2 ; Move in the new wimg | |
2926 | rlwimi r8,r5,0,20,31 ; Copy into the mapping copy | |
2927 | lwz r2,mpPAddr(r31) ; Get the physical address | |
2928 | li r0,0xFFF ; Start a mask | |
2929 | andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size | |
2930 | rlwinm r5,r0,0,1,0 ; Copy to top half | |
2931 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
2932 | rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left | |
2933 | and r5,r5,r2 ; Clean stuff in top 32 bits | |
2934 | andc r2,r2,r0 ; Clean bottom too | |
2935 | rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address | |
2936 | b hwpSAM ; Join common | |
1c79356b | 2937 | |
55e303ae A |
2938 | ; NOTE: we moved the remainder of the code out of here because it |
2939 | ; did not fit in the 128 bytes allotted. It got stuck into the free space | |
2940 | ; at the end of the no-op function. | |
2941 | ||
2942 | ||
2943 | ||
de355530 | 2944 | |
55e303ae | 2945 | ; Function 5 - Clear reference in physent |
1c79356b | 2946 | |
55e303ae | 2947 | .set .,hwpOpBase+(5*128) ; Generate error if previous function too long |
1c79356b | 2948 | |
55e303ae | 2949 | hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
1c79356b | 2950 | |
55e303ae | 2951 | hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags |
91447636 | 2952 | rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R |
55e303ae A |
2953 | stwcx. r4,r5,r29 ; Try to stuff it |
2954 | bne-- hwpCRefPhX ; Try again... | |
2955 | ; Note: CR0_EQ is set because of stwcx. | |
2956 | blr ; Return... | |
1c79356b A |
2957 | |
2958 | ||
55e303ae | 2959 | ; Function 6 - Clear reference in mapping |
1c79356b | 2960 | |
55e303ae | 2961 | .set .,hwpOpBase+(6*128) ; Generate error if previous function too long |
1c79356b | 2962 | |
55e303ae A |
2963 | hwpCRefMap: li r0,lo16(mpR) ; Get reference bit |
2964 | lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
2965 | andc r5,r5,r0 ; Clear in PTE copy | |
2966 | andc r8,r8,r0 ; and in the mapping | |
2967 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
2968 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
2969 | blr ; Return... | |
1c79356b | 2970 | |
de355530 | 2971 | |
55e303ae | 2972 | ; Function 7 - Clear change in physent |
1c79356b | 2973 | |
55e303ae | 2974 | .set .,hwpOpBase+(7*128) ; Generate error if previous function too long |
1c79356b | 2975 | |
55e303ae | 2976 | hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
1c79356b | 2977 | |
55e303ae A |
2978 | hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags |
2979 | rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C | |
2980 | stwcx. r4,r5,r29 ; Try to stuff it | |
2981 | bne-- hwpCCngPhX ; Try again... | |
2982 | ; Note: CR0_EQ is set because of stwcx. | |
2983 | blr ; Return... | |
1c79356b | 2984 | |
de355530 | 2985 | |
55e303ae | 2986 | ; Function 8 - Clear change in mapping |
1c79356b | 2987 | |
55e303ae A |
2988 | .set .,hwpOpBase+(8*128) ; Generate error if previous function too long |
2989 | ||
2990 | hwpCCngMap: li r0,lo16(mpC) ; Get change bit | |
2991 | lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
2992 | andc r5,r5,r0 ; Clear in PTE copy | |
2993 | andc r8,r8,r0 ; and in the mapping | |
2994 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
2995 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
2996 | blr ; Return... | |
d7e50217 | 2997 | |
de355530 | 2998 | |
55e303ae | 2999 | ; Function 9 - Set reference in physent |
d7e50217 | 3000 | |
55e303ae | 3001 | .set .,hwpOpBase+(9*128) ; Generate error if previous function too long |
d7e50217 | 3002 | |
55e303ae A |
3003 | hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
3004 | ||
3005 | hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags | |
3006 | ori r4,r4,lo16(ppR) ; Set the reference | |
3007 | stwcx. r4,r5,r29 ; Try to stuff it | |
3008 | bne-- hwpSRefPhX ; Try again... | |
3009 | ; Note: CR0_EQ is set because of stwcx. | |
3010 | blr ; Return... | |
d7e50217 | 3011 | |
1c79356b | 3012 | |
55e303ae | 3013 | ; Function 10 - Set reference in mapping |
d7e50217 | 3014 | |
55e303ae A |
3015 | .set .,hwpOpBase+(10*128) ; Generate error if previous function too long |
3016 | ||
3017 | hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
55e303ae A |
3018 | ori r8,r8,lo16(mpR) ; Set reference in mapping |
3019 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
3020 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
3021 | blr ; Return... | |
3022 | ||
3023 | ; Function 11 - Set change in physent | |
1c79356b | 3024 | |
55e303ae | 3025 | .set .,hwpOpBase+(11*128) ; Generate error if previous function too long |
1c79356b | 3026 | |
55e303ae | 3027 | hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent |
1c79356b | 3028 | |
55e303ae A |
3029 | hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags |
3030 | ori r4,r4,lo16(ppC) ; Set the change bit | |
3031 | stwcx. r4,r5,r29 ; Try to stuff it | |
3032 | bne-- hwpSCngPhX ; Try again... | |
3033 | ; Note: CR0_EQ is set because of stwcx. | |
3034 | blr ; Return... | |
de355530 | 3035 | |
55e303ae | 3036 | ; Function 12 - Set change in mapping |
1c79356b | 3037 | |
55e303ae | 3038 | .set .,hwpOpBase+(12*128) ; Generate error if previous function too long |
1c79356b | 3039 | |
55e303ae | 3040 | hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping |
55e303ae A |
3041 | ori r8,r8,lo16(mpC) ; Set chage in mapping |
3042 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
3043 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
3044 | blr ; Return... | |
1c79356b | 3045 | |
55e303ae | 3046 | ; Function 13 - Test reference in physent |
1c79356b | 3047 | |
55e303ae A |
3048 | .set .,hwpOpBase+(13*128) ; Generate error if previous function too long |
3049 | ||
3050 | hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent | |
3051 | rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0 | |
3052 | blr ; Return (CR0_EQ set to continue if reference is off)... | |
1c79356b | 3053 | |
1c79356b | 3054 | |
55e303ae | 3055 | ; Function 14 - Test reference in mapping |
1c79356b | 3056 | |
55e303ae | 3057 | .set .,hwpOpBase+(14*128) ; Generate error if previous function too long |
de355530 | 3058 | |
55e303ae A |
3059 | hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0 |
3060 | blr ; Return (CR0_EQ set to continue if reference is off)... | |
3061 | ||
91447636 | 3062 | |
55e303ae | 3063 | ; Function 15 - Test change in physent |
1c79356b | 3064 | |
55e303ae | 3065 | .set .,hwpOpBase+(15*128) ; Generate error if previous function too long |
1c79356b | 3066 | |
55e303ae A |
3067 | hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent |
3068 | rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0 | |
91447636 | 3069 | blr ; Return (CR0_EQ set to continue if change is off)... |
55e303ae A |
3070 | |
3071 | ||
3072 | ; Function 16 - Test change in mapping | |
3073 | ||
3074 | .set .,hwpOpBase+(16*128) ; Generate error if previous function too long | |
d7e50217 | 3075 | |
55e303ae | 3076 | hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0 |
91447636 A |
3077 | blr ; Return (CR0_EQ set to continue if change is off)... |
3078 | ||
3079 | ||
3080 | ; Function 17 - Test reference and change in physent | |
55e303ae A |
3081 | |
3082 | .set .,hwpOpBase+(17*128) ; Generate error if previous function too long | |
3083 | ||
91447636 A |
3084 | hwpTRefCngPhy: |
3085 | lwz r0,ppLink+4(r29) ; Get the flags from physent | |
3086 | rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits | |
3087 | cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1)) | |
3088 | crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0)) | |
3089 | blr ; Return (CR0_EQ set to continue if either R or C is off)... | |
3090 | ||
3091 | ||
3092 | ; Function 18 - Test reference and change in mapping | |
3093 | ||
3094 | .set .,hwpOpBase+(18*128) ; Generate error if previous function too long | |
3095 | hwpTRefCngMap: | |
3096 | rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping | |
3097 | cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1)) | |
3098 | crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0)) | |
3099 | blr ; Return (CR0_EQ set to continue if either R or C is off)... | |
3100 | ||
3101 | ||
3102 | ; Function 19 - Clear reference and change in physent | |
3103 | ||
3104 | .set .,hwpOpBase+(19*128) ; Generate error if previous function too long | |
3105 | hwpCRefCngPhy: | |
3106 | li r5,ppLink+4 ; Get offset for flag part of physent | |
3107 | ||
3108 | hwpCRefCngPhX: | |
3109 | lwarx r4,r5,r29 ; Get the old flags | |
3110 | andc r4,r4,r25 ; Clear R and C as specified by mask | |
3111 | stwcx. r4,r5,r29 ; Try to stuff it | |
3112 | bne-- hwpCRefCngPhX ; Try again... | |
3113 | ; Note: CR0_EQ is set because of stwcx. | |
3114 | blr ; Return... | |
3115 | ||
3116 | ||
3117 | ; Function 20 - Clear reference and change in mapping | |
3118 | ||
3119 | .set .,hwpOpBase+(20*128) ; Generate error if previous function too long | |
3120 | hwpCRefCngMap: | |
3121 | srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map) | |
3122 | lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping | |
3123 | andc r5,r5,r0 ; Clear in PTE copy | |
3124 | andc r8,r8,r0 ; and in the mapping | |
3125 | cmpw r0,r0 ; Make sure we return CR0_EQ | |
3126 | stw r8,mpVAddr+4(r31) ; Set the flag part of mapping | |
3127 | blr ; Return... | |
3128 | ||
d7e50217 | 3129 | |
91447636 | 3130 | .set .,hwpOpBase+(21*128) ; Generate error if previous function too long |
d7e50217 | 3131 | |
de355530 | 3132 | ; |
91447636 | 3133 | ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping. |
55e303ae A |
3134 | ; |
3135 | ; Returns: | |
3136 | ; mapRtOK - if all is ok | |
3137 | ; mapRtBadLk - if mapping lock fails | |
3138 | ; mapRtPerm - if mapping is permanent | |
3139 | ; mapRtNotFnd - if mapping is not found | |
3140 | ; mapRtBlock - if mapping is a block | |
de355530 | 3141 | ; |
55e303ae A |
3142 | .align 5 |
3143 | .globl EXT(hw_protect) | |
d7e50217 | 3144 | |
55e303ae A |
3145 | LEXT(hw_protect) |
3146 | stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
3147 | mflr r0 ; Save the link register | |
3148 | stw r24,FM_ARG0+0x00(r1) ; Save a register | |
3149 | stw r25,FM_ARG0+0x04(r1) ; Save a register | |
3150 | mr r25,r7 ; Remember address of next va | |
3151 | stw r26,FM_ARG0+0x08(r1) ; Save a register | |
3152 | stw r27,FM_ARG0+0x0C(r1) ; Save a register | |
3153 | stw r28,FM_ARG0+0x10(r1) ; Save a register | |
3154 | mr r24,r6 ; Save the new protection flags | |
3155 | stw r29,FM_ARG0+0x14(r1) ; Save a register | |
3156 | stw r30,FM_ARG0+0x18(r1) ; Save a register | |
3157 | stw r31,FM_ARG0+0x1C(r1) ; Save a register | |
3158 | stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
1c79356b | 3159 | |
91447636 A |
3160 | #if DEBUG |
3161 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
3162 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
3163 | bne hpPanic ; Call not valid for guest shadow assist pmap | |
3164 | #endif | |
3165 | ||
55e303ae A |
3166 | lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap |
3167 | lwz r7,pmapvr+4(r3) ; Get the second part | |
d7e50217 | 3168 | |
d7e50217 | 3169 | |
55e303ae | 3170 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit |
9bccf70c | 3171 | |
55e303ae A |
3172 | mr r27,r11 ; Remember the old MSR |
3173 | mr r26,r12 ; Remember the feature bits | |
9bccf70c | 3174 | |
55e303ae | 3175 | xor r28,r3,r7 ; Change the common 32- and 64-bit half |
9bccf70c | 3176 | |
55e303ae A |
3177 | bf-- pf64Bitb,hpSF1 ; skip if 32-bit... |
3178 | ||
3179 | rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top | |
9bccf70c | 3180 | |
55e303ae A |
3181 | hpSF1: mr r29,r4 ; Save top half of vaddr |
3182 | mr r30,r5 ; Save the bottom half | |
3183 | ||
3184 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3185 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
3186 | mr. r3,r3 ; Did we get the lock? | |
3187 | bne-- hpBadLock ; Nope... | |
d7e50217 | 3188 | |
55e303ae A |
3189 | mr r3,r28 ; get the pmap address |
3190 | mr r4,r29 ; Get bits 0:31 to look for | |
3191 | mr r5,r30 ; Get bits 32:64 | |
de355530 | 3192 | |
55e303ae | 3193 | bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags) |
d7e50217 | 3194 | |
91447636 A |
3195 | rlwinm. r0,r7,0,mpType ; Is this a normal mapping? |
3196 | crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping | |
3197 | andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed? | |
3198 | cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed | |
55e303ae | 3199 | mr. r31,r3 ; Save the mapping if we found it |
55e303ae A |
3200 | mr r29,r4 ; Save next va high half |
3201 | mr r30,r5 ; Save next va low half | |
d7e50217 | 3202 | |
55e303ae | 3203 | beq-- hpNotFound ; Not found... |
de355530 | 3204 | |
91447636 | 3205 | bf-- cr1_eq,hpNotAllowed ; Something special is happening... |
d7e50217 | 3206 | |
55e303ae A |
3207 | bt++ pf64Bitb,hpDo64 ; Split for 64 bit |
3208 | ||
3209 | bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent | |
3210 | ||
91447636 | 3211 | rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit) |
55e303ae A |
3212 | mr. r3,r3 ; Was there a previously valid PTE? |
3213 | ||
3214 | stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest) | |
3215 | ||
3216 | beq-- hpNoOld32 ; Nope... | |
1c79356b | 3217 | |
55e303ae A |
3218 | stw r5,4(r3) ; Store second half of PTE |
3219 | eieio ; Make sure we do not reorder | |
3220 | stw r4,0(r3) ; Revalidate the PTE | |
3221 | ||
3222 | eieio ; Make sure all updates come first | |
3223 | stw r6,0(r7) ; Unlock PCA | |
3224 | ||
3225 | hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3226 | bl sxlkUnlock ; Unlock the search list | |
de355530 | 3227 | |
55e303ae A |
3228 | li r3,mapRtOK ; Set normal return |
3229 | b hpR32 ; Join common... | |
3230 | ||
3231 | .align 5 | |
1c79356b | 3232 | |
d7e50217 | 3233 | |
55e303ae A |
3234 | hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent |
3235 | ||
91447636 | 3236 | rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits |
55e303ae A |
3237 | mr. r3,r3 ; Was there a previously valid PTE? |
3238 | ||
3239 | stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest) | |
3240 | ||
3241 | beq-- hpNoOld64 ; Nope... | |
d7e50217 | 3242 | |
55e303ae A |
3243 | std r5,8(r3) ; Store second half of PTE |
3244 | eieio ; Make sure we do not reorder | |
3245 | std r4,0(r3) ; Revalidate the PTE | |
de355530 | 3246 | |
55e303ae A |
3247 | eieio ; Make sure all updates come first |
3248 | stw r6,0(r7) ; Unlock PCA | |
de355530 | 3249 | |
55e303ae A |
3250 | hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3251 | bl sxlkUnlock ; Unlock the search list | |
de355530 | 3252 | |
55e303ae A |
3253 | li r3,mapRtOK ; Set normal return |
3254 | b hpR64 ; Join common... | |
de355530 | 3255 | |
55e303ae A |
3256 | .align 5 |
3257 | ||
3258 | hpReturn: bt++ pf64Bitb,hpR64 ; Yes... | |
3259 | ||
3260 | hpR32: mtmsr r27 ; Restore enables/translation/etc. | |
3261 | isync | |
3262 | b hpReturnC ; Join common... | |
3263 | ||
3264 | hpR64: mtmsrd r27 ; Restore enables/translation/etc. | |
3265 | isync | |
3266 | ||
3267 | hpReturnC: stw r29,0(r25) ; Save the top of the next va | |
3268 | stw r30,4(r25) ; Save the bottom of the next va | |
3269 | lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
3270 | lwz r24,FM_ARG0+0x00(r1) ; Save a register | |
3271 | lwz r25,FM_ARG0+0x04(r1) ; Save a register | |
3272 | lwz r26,FM_ARG0+0x08(r1) ; Save a register | |
3273 | mtlr r0 ; Restore the return | |
3274 | lwz r27,FM_ARG0+0x0C(r1) ; Save a register | |
3275 | lwz r28,FM_ARG0+0x10(r1) ; Save a register | |
3276 | lwz r29,FM_ARG0+0x14(r1) ; Save a register | |
3277 | lwz r30,FM_ARG0+0x18(r1) ; Save a register | |
3278 | lwz r31,FM_ARG0+0x1C(r1) ; Save a register | |
3279 | lwz r1,0(r1) ; Pop the stack | |
3280 | blr ; Leave... | |
3281 | ||
3282 | .align 5 | |
3283 | ||
3284 | hpBadLock: li r3,mapRtBadLk ; Set lock time out error code | |
3285 | b hpReturn ; Leave.... | |
d7e50217 | 3286 | |
55e303ae A |
3287 | hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3288 | bl sxlkUnlock ; Unlock the search list | |
d7e50217 | 3289 | |
55e303ae A |
3290 | li r3,mapRtNotFnd ; Set that we did not find the requested page |
3291 | b hpReturn ; Leave.... | |
3292 | ||
3293 | hpNotAllowed: | |
3294 | rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed? | |
3295 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3296 | bne-- hpNotFound ; Yeah... | |
3297 | bl sxlkUnlock ; Unlock the search list | |
3298 | ||
3299 | li r3,mapRtBlock ; Assume it was a block | |
91447636 A |
3300 | rlwinm r0,r7,0,mpType ; Isolate mapping type |
3301 | cmplwi r0,mpBlock ; Is this a block mapping? | |
3302 | beq++ hpReturn ; Yes, leave... | |
55e303ae A |
3303 | |
3304 | li r3,mapRtPerm ; Set that we hit a permanent page | |
3305 | b hpReturn ; Leave.... | |
9bccf70c | 3306 | |
91447636 A |
3307 | hpPanic: lis r0,hi16(Choke) ; System abend |
3308 | ori r0,r0,lo16(Choke) ; System abend | |
3309 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
3310 | sc | |
3311 | ||
9bccf70c | 3312 | |
55e303ae A |
3313 | ; |
3314 | ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va | |
3315 | ; | |
3316 | ; Returns following code ORed with RC from mapping | |
3317 | ; mapRtOK - if all is ok | |
3318 | ; mapRtBadLk - if mapping lock fails | |
3319 | ; mapRtNotFnd - if mapping is not found | |
3320 | ; | |
3321 | .align 5 | |
3322 | .globl EXT(hw_test_rc) | |
9bccf70c | 3323 | |
55e303ae A |
3324 | LEXT(hw_test_rc) |
3325 | stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack | |
3326 | mflr r0 ; Save the link register | |
3327 | stw r24,FM_ARG0+0x00(r1) ; Save a register | |
3328 | stw r25,FM_ARG0+0x04(r1) ; Save a register | |
3329 | stw r26,FM_ARG0+0x08(r1) ; Save a register | |
3330 | stw r27,FM_ARG0+0x0C(r1) ; Save a register | |
3331 | stw r28,FM_ARG0+0x10(r1) ; Save a register | |
3332 | mr r24,r6 ; Save the reset request | |
3333 | stw r29,FM_ARG0+0x14(r1) ; Save a register | |
3334 | stw r30,FM_ARG0+0x18(r1) ; Save a register | |
3335 | stw r31,FM_ARG0+0x1C(r1) ; Save a register | |
3336 | stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
9bccf70c | 3337 | |
91447636 A |
3338 | #if DEBUG |
3339 | lwz r11,pmapFlags(r3) ; Get pmaps flags | |
3340 | rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active? | |
3341 | bne htrPanic ; Call not valid for guest shadow assist pmap | |
3342 | #endif | |
3343 | ||
55e303ae A |
3344 | lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap |
3345 | lwz r7,pmapvr+4(r3) ; Get the second part | |
0b4e3aa0 | 3346 | |
9bccf70c | 3347 | |
55e303ae | 3348 | bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit |
9bccf70c | 3349 | |
55e303ae A |
3350 | mr r27,r11 ; Remember the old MSR |
3351 | mr r26,r12 ; Remember the feature bits | |
9bccf70c | 3352 | |
55e303ae | 3353 | xor r28,r3,r7 ; Change the common 32- and 64-bit half |
9bccf70c | 3354 | |
55e303ae | 3355 | bf-- pf64Bitb,htrSF1 ; skip if 32-bit... |
1c79356b | 3356 | |
55e303ae | 3357 | rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top |
1c79356b | 3358 | |
55e303ae A |
3359 | htrSF1: mr r29,r4 ; Save top half of vaddr |
3360 | mr r30,r5 ; Save the bottom half | |
1c79356b | 3361 | |
55e303ae A |
3362 | la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3363 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
3364 | mr. r3,r3 ; Did we get the lock? | |
3365 | li r25,0 ; Clear RC | |
3366 | bne-- htrBadLock ; Nope... | |
3367 | ||
3368 | mr r3,r28 ; get the pmap address | |
3369 | mr r4,r29 ; Get bits 0:31 to look for | |
3370 | mr r5,r30 ; Get bits 32:64 | |
d7e50217 | 3371 | |
55e303ae | 3372 | bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags) |
9bccf70c | 3373 | |
91447636 A |
3374 | rlwinm. r0,r7,0,mpType ; Is this a normal mapping? |
3375 | crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping | |
3376 | andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed? | |
3377 | crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed | |
55e303ae | 3378 | mr. r31,r3 ; Save the mapping if we found it |
91447636 | 3379 | crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed |
d7e50217 | 3380 | |
91447636 | 3381 | bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed... |
1c79356b | 3382 | |
55e303ae A |
3383 | bt++ pf64Bitb,htrDo64 ; Split for 64 bit |
3384 | ||
3385 | bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent | |
3386 | ||
3387 | cmplwi cr1,r24,0 ; Do we want to clear RC? | |
3388 | lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field | |
3389 | mr. r3,r3 ; Was there a previously valid PTE? | |
3390 | li r0,lo16(mpR|mpC) ; Get bits to clear | |
9bccf70c | 3391 | |
55e303ae A |
3392 | and r25,r5,r0 ; Save the RC bits |
3393 | beq++ cr1,htrNoClr32 ; Nope... | |
3394 | ||
3395 | andc r12,r12,r0 ; Clear mapping copy of RC | |
3396 | andc r5,r5,r0 ; Clear PTE copy of RC | |
3397 | sth r12,mpVAddr+6(r31) ; Set the new RC | |
9bccf70c | 3398 | |
55e303ae | 3399 | htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE... |
d7e50217 | 3400 | |
55e303ae A |
3401 | sth r5,6(r3) ; Store updated RC |
3402 | eieio ; Make sure we do not reorder | |
3403 | stw r4,0(r3) ; Revalidate the PTE | |
9bccf70c | 3404 | |
55e303ae A |
3405 | eieio ; Make sure all updates come first |
3406 | stw r6,0(r7) ; Unlock PCA | |
1c79356b | 3407 | |
55e303ae A |
3408 | htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3409 | bl sxlkUnlock ; Unlock the search list | |
3410 | li r3,mapRtOK ; Set normal return | |
3411 | b htrR32 ; Join common... | |
1c79356b | 3412 | |
55e303ae A |
3413 | .align 5 |
3414 | ||
3415 | ||
3416 | htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent | |
3417 | ||
3418 | cmplwi cr1,r24,0 ; Do we want to clear RC? | |
3419 | lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field | |
3420 | mr. r3,r3 ; Was there a previously valid PTE? | |
3421 | li r0,lo16(mpR|mpC) ; Get bits to clear | |
1c79356b | 3422 | |
55e303ae A |
3423 | and r25,r5,r0 ; Save the RC bits |
3424 | beq++ cr1,htrNoClr64 ; Nope... | |
3425 | ||
3426 | andc r12,r12,r0 ; Clear mapping copy of RC | |
3427 | andc r5,r5,r0 ; Clear PTE copy of RC | |
3428 | sth r12,mpVAddr+6(r31) ; Set the new RC | |
1c79356b | 3429 | |
55e303ae A |
3430 | htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte... |
3431 | ||
3432 | sth r5,14(r3) ; Store updated RC | |
3433 | eieio ; Make sure we do not reorder | |
3434 | std r4,0(r3) ; Revalidate the PTE | |
1c79356b | 3435 | |
55e303ae A |
3436 | eieio ; Make sure all updates come first |
3437 | stw r6,0(r7) ; Unlock PCA | |
1c79356b | 3438 | |
55e303ae A |
3439 | htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock |
3440 | bl sxlkUnlock ; Unlock the search list | |
3441 | li r3,mapRtOK ; Set normal return | |
3442 | b htrR64 ; Join common... | |
de355530 | 3443 | |
55e303ae A |
3444 | .align 5 |
3445 | ||
3446 | htrReturn: bt++ pf64Bitb,htrR64 ; Yes... | |
de355530 | 3447 | |
55e303ae A |
3448 | htrR32: mtmsr r27 ; Restore enables/translation/etc. |
3449 | isync | |
3450 | b htrReturnC ; Join common... | |
de355530 | 3451 | |
55e303ae A |
3452 | htrR64: mtmsrd r27 ; Restore enables/translation/etc. |
3453 | isync | |
1c79356b | 3454 | |
55e303ae A |
3455 | htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return |
3456 | or r3,r3,r25 ; Send the RC bits back | |
3457 | lwz r24,FM_ARG0+0x00(r1) ; Save a register | |
3458 | lwz r25,FM_ARG0+0x04(r1) ; Save a register | |
3459 | lwz r26,FM_ARG0+0x08(r1) ; Save a register | |
3460 | mtlr r0 ; Restore the return | |
3461 | lwz r27,FM_ARG0+0x0C(r1) ; Save a register | |
3462 | lwz r28,FM_ARG0+0x10(r1) ; Save a register | |
3463 | lwz r29,FM_ARG0+0x14(r1) ; Save a register | |
3464 | lwz r30,FM_ARG0+0x18(r1) ; Save a register | |
3465 | lwz r31,FM_ARG0+0x1C(r1) ; Save a register | |
3466 | lwz r1,0(r1) ; Pop the stack | |
1c79356b A |
3467 | blr ; Leave... |
3468 | ||
3469 | .align 5 | |
3470 | ||
55e303ae A |
3471 | htrBadLock: li r3,mapRtBadLk ; Set lock time out error code |
3472 | b htrReturn ; Leave.... | |
1c79356b | 3473 | |
55e303ae A |
3474 | htrNotFound: |
3475 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3476 | bl sxlkUnlock ; Unlock the search list | |
1c79356b | 3477 | |
55e303ae A |
3478 | li r3,mapRtNotFnd ; Set that we did not find the requested page |
3479 | b htrReturn ; Leave.... | |
3480 | ||
91447636 A |
3481 | htrPanic: lis r0,hi16(Choke) ; System abend |
3482 | ori r0,r0,lo16(Choke) ; System abend | |
3483 | li r3,failMapping ; Show that we failed some kind of mapping thing | |
3484 | sc | |
3485 | ||
3486 | ||
3487 | ; | |
3488 | ; | |
3489 | ; mapFindLockPN - find and lock physent for a given page number | |
3490 | ; | |
3491 | ; | |
3492 | .align 5 | |
3493 | mapFindLockPN: | |
3494 | lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
3495 | mr r2,r3 ; Save our target | |
3496 | ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table | |
3497 | ||
3498 | mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address | |
3499 | lwz r5,mrStart(r9) ; Get start of table entry | |
3500 | lwz r0,mrEnd(r9) ; Get end of table entry | |
3501 | addi r9,r9,mrSize ; Point to the next slot | |
3a60a9f5 | 3502 | cmplwi cr7,r3,0 ; Are we at the end of the table? |
91447636 A |
3503 | cmplw r2,r5 ; See if we are in this table |
3504 | cmplw cr1,r2,r0 ; Check end also | |
3505 | sub r4,r2,r5 ; Calculate index to physical entry | |
3a60a9f5 | 3506 | beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry... |
91447636 A |
3507 | cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry |
3508 | slwi r4,r4,3 ; Get offset to physical entry | |
3509 | ||
3510 | blt-- mapFLPNitr ; Did not find it... | |
3511 | ||
3512 | add r3,r3,r4 ; Point right to the slot | |
3513 | b mapPhysLock ; Join common lock code | |
3514 | ||
3515 | mapFLPNmiss: | |
3516 | li r3,0 ; Show that we did not find it | |
3517 | blr ; Leave... | |
3518 | ||
3519 | ||
3520 | ; | |
55e303ae A |
3521 | ; mapPhysFindLock - find physent list and lock it |
3522 | ; R31 points to mapping | |
3523 | ; | |
3524 | .align 5 | |
3525 | ||
3526 | mapPhysFindLock: | |
3527 | lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table | |
3528 | lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part) | |
91447636 | 3529 | rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset |
55e303ae A |
3530 | addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry |
3531 | add r3,r3,r4 ; Point to table entry | |
3532 | lwz r5,mpPAddr(r31) ; Get physical page number | |
3533 | lwz r7,mrStart(r3) ; Get the start of range | |
3534 | lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank | |
3535 | sub r6,r5,r7 ; Get index to physent | |
3536 | rlwinm r6,r6,3,0,28 ; Get offset to physent | |
3537 | add r3,r3,r6 ; Point right to the physent | |
3538 | b mapPhysLock ; Join in the lock... | |
3539 | ||
3540 | ; | |
3541 | ; mapPhysLock - lock a physent list | |
3542 | ; R3 contains list header | |
3543 | ; | |
3544 | .align 5 | |
3545 | ||
3546 | mapPhysLockS: | |
3547 | li r2,lgKillResv ; Get a spot to kill reservation | |
3548 | stwcx. r2,0,r2 ; Kill it... | |
3549 | ||
3550 | mapPhysLockT: | |
3551 | lwz r2,ppLink(r3) ; Get physent chain header | |
3552 | rlwinm. r2,r2,0,0,0 ; Is lock clear? | |
3553 | bne-- mapPhysLockT ; Nope, still locked... | |
3554 | ||
3555 | mapPhysLock: | |
3556 | lwarx r2,0,r3 ; Get the lock | |
3557 | rlwinm. r0,r2,0,0,0 ; Is it locked? | |
3558 | oris r0,r2,0x8000 ; Set the lock bit | |
3559 | bne-- mapPhysLockS ; It is locked, spin on it... | |
3560 | stwcx. r0,0,r3 ; Try to stuff it back... | |
3561 | bne-- mapPhysLock ; Collision, try again... | |
3562 | isync ; Clear any speculations | |
3563 | blr ; Leave... | |
3564 | ||
3565 | ||
3566 | ; | |
3567 | ; mapPhysUnlock - unlock a physent list | |
3568 | ; R3 contains list header | |
3569 | ; | |
3570 | .align 5 | |
3571 | ||
3572 | mapPhysUnlock: | |
3573 | lwz r0,ppLink(r3) ; Get physent chain header | |
3574 | rlwinm r0,r0,0,1,31 ; Clear the lock bit | |
3575 | eieio ; Make sure unlock comes last | |
3576 | stw r0,ppLink(r3) ; Unlock the list | |
3577 | blr | |
3578 | ||
3579 | ; | |
3580 | ; mapPhysMerge - merge the RC bits into the master copy | |
3581 | ; R3 points to the physent | |
3582 | ; R4 contains the RC bits | |
3583 | ; | |
3584 | ; Note: we just return if RC is 0 | |
3585 | ; | |
3586 | .align 5 | |
3587 | ||
3588 | mapPhysMerge: | |
3589 | rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits | |
3590 | la r5,ppLink+4(r3) ; Point to the RC field | |
3591 | beqlr-- ; Leave if RC is 0... | |
3592 | ||
3593 | mapPhysMergeT: | |
3594 | lwarx r6,0,r5 ; Get the RC part | |
3595 | or r6,r6,r4 ; Merge in the RC | |
3596 | stwcx. r6,0,r5 ; Try to stuff it back... | |
3597 | bne-- mapPhysMergeT ; Collision, try again... | |
3598 | blr ; Leave... | |
3599 | ||
3600 | ; | |
3601 | ; Sets the physent link pointer and preserves all flags | |
3602 | ; The list is locked | |
3603 | ; R3 points to physent | |
3604 | ; R4 has link to set | |
3605 | ; | |
3606 | ||
3607 | .align 5 | |
3608 | ||
3609 | mapPhyCSet32: | |
3610 | la r5,ppLink+4(r3) ; Point to the link word | |
3611 | ||
3612 | mapPhyCSetR: | |
3613 | lwarx r2,0,r5 ; Get the link and flags | |
91447636 | 3614 | rlwimi r4,r2,0,ppFlags ; Insert the flags |
55e303ae A |
3615 | stwcx. r4,0,r5 ; Stick them back |
3616 | bne-- mapPhyCSetR ; Someone else did something, try again... | |
3617 | blr ; Return... | |
3618 | ||
3619 | .align 5 | |
3620 | ||
3621 | mapPhyCSet64: | |
91447636 A |
3622 | li r0,ppLFAmask ; Get mask to clean up mapping pointer |
3623 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
55e303ae A |
3624 | |
3625 | mapPhyCSet64x: | |
3626 | ldarx r2,0,r3 ; Get the link and flags | |
3627 | and r5,r2,r0 ; Isolate the flags | |
3628 | or r6,r4,r5 ; Add them to the link | |
3629 | stdcx. r6,0,r3 ; Stick them back | |
3630 | bne-- mapPhyCSet64x ; Someone else did something, try again... | |
3631 | blr ; Return... | |
3632 | ||
3633 | ; | |
3634 | ; mapBumpBusy - increment the busy count on a mapping | |
3635 | ; R3 points to mapping | |
3636 | ; | |
3637 | ||
3638 | .align 5 | |
3639 | ||
3640 | mapBumpBusy: | |
3641 | lwarx r4,0,r3 ; Get mpBusy | |
3642 | addis r4,r4,0x0100 ; Bump the busy count | |
3643 | stwcx. r4,0,r3 ; Save it back | |
3644 | bne-- mapBumpBusy ; This did not work, try again... | |
3645 | blr ; Leave... | |
3646 | ||
3647 | ; | |
3648 | ; mapDropBusy - increment the busy count on a mapping | |
3649 | ; R3 points to mapping | |
3650 | ; | |
3651 | ||
3652 | .globl EXT(mapping_drop_busy) | |
3653 | .align 5 | |
3654 | ||
3655 | LEXT(mapping_drop_busy) | |
3656 | mapDropBusy: | |
3657 | lwarx r4,0,r3 ; Get mpBusy | |
3658 | addis r4,r4,0xFF00 ; Drop the busy count | |
3659 | stwcx. r4,0,r3 ; Save it back | |
3660 | bne-- mapDropBusy ; This did not work, try again... | |
3661 | blr ; Leave... | |
3662 | ||
3663 | ; | |
3664 | ; mapDrainBusy - drain the busy count on a mapping | |
3665 | ; R3 points to mapping | |
3666 | ; Note: we already have a busy for ourselves. Only one | |
3667 | ; busy per processor is allowed, so we just spin here | |
3668 | ; waiting for the count to drop to 1. | |
3669 | ; Also, the mapping can not be on any lists when we do this | |
3670 | ; so all we are doing is waiting until it can be released. | |
3671 | ; | |
3672 | ||
3673 | .align 5 | |
3674 | ||
3675 | mapDrainBusy: | |
3676 | lwz r4,mpFlags(r3) ; Get mpBusy | |
3677 | rlwinm r4,r4,8,24,31 ; Clean it up | |
3678 | cmplwi r4,1 ; Is is just our busy? | |
3679 | beqlr++ ; Yeah, it is clear... | |
3680 | b mapDrainBusy ; Try again... | |
3681 | ||
3682 | ||
3683 | ||
3684 | ; | |
3685 | ; handleDSeg - handle a data segment fault | |
3686 | ; handleISeg - handle an instruction segment fault | |
3687 | ; | |
3688 | ; All that we do here is to map these to DSI or ISI and insure | |
3689 | ; that the hash bit is not set. This forces the fault code | |
3690 | ; to also handle the missing segment. | |
3691 | ; | |
3692 | ; At entry R2 contains per_proc, R13 contains savarea pointer, | |
3693 | ; and R11 is the exception code. | |
3694 | ; | |
3695 | ||
3696 | .align 5 | |
3697 | .globl EXT(handleDSeg) | |
3698 | ||
3699 | LEXT(handleDSeg) | |
3700 | ||
3701 | li r11,T_DATA_ACCESS ; Change fault to DSI | |
3702 | stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss | |
3703 | b EXT(handlePF) ; Join common... | |
3704 | ||
3705 | .align 5 | |
3706 | .globl EXT(handleISeg) | |
3707 | ||
3708 | LEXT(handleISeg) | |
3709 | ||
3710 | li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI | |
3711 | stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss | |
3712 | b EXT(handlePF) ; Join common... | |
3713 | ||
3714 | ||
3715 | /* | |
3716 | * handlePF - handle a page fault interruption | |
3717 | * | |
3718 | * At entry R2 contains per_proc, R13 contains savarea pointer, | |
3719 | * and R11 is the exception code. | |
3720 | * | |
3721 | * This first part does a quick check to see if we can handle the fault. | |
3722 | * We canot handle any kind of protection exceptions here, so we pass | |
3723 | * them up to the next level. | |
3724 | * | |
3725 | * NOTE: In order for a page-fault redrive to work, the translation miss | |
3726 | * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur | |
3727 | * before we come here. | |
3728 | */ | |
3729 | ||
3730 | .align 5 | |
3731 | .globl EXT(handlePF) | |
3732 | ||
3733 | LEXT(handlePF) | |
3734 | ||
3735 | mfsprg r12,2 ; Get feature flags | |
3736 | cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction | |
3737 | lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode | |
3738 | mtcrf 0x02,r12 ; move pf64Bit to cr6 | |
3739 | lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here | |
3740 | lwz r18,SAVflags(r13) ; Get the flags | |
3741 | ||
3742 | beq-- gotIfetch ; We have an IFETCH here... | |
3743 | ||
3744 | lwz r27,savedsisr(r13) ; Get the DSISR | |
3745 | lwz r29,savedar(r13) ; Get the first half of the DAR | |
3746 | lwz r30,savedar+4(r13) ; And second half | |
3747 | ||
3748 | b ckIfProt ; Go check if this is a protection fault... | |
3749 | ||
3750 | gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value | |
3751 | lwz r29,savesrr0(r13) ; Get the first half of the instruction address | |
3752 | lwz r30,savesrr0+4(r13) ; And second half | |
3753 | stw r27,savedsisr(r13) ; Save the "constructed" DSISR | |
3754 | ||
3755 | ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception? | |
3756 | li r20,64 ; Set a limit of 64 nests for sanity check | |
3757 | bne-- hpfExit ; Yes... (probably not though) | |
91447636 | 3758 | |
55e303ae A |
3759 | ; |
3760 | ; Note: if the RI is on, we are accessing user space from the kernel, therefore we | |
3761 | ; should be loading the user pmap here. | |
3762 | ; | |
3763 | ||
3764 | andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space? | |
3765 | lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel | |
3766 | mr r19,r2 ; Remember the per_proc | |
3767 | ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address) | |
3768 | mr r23,r30 ; Save the low part of faulting address | |
3769 | beq-- hpfInKern ; Skip if we are in the kernel | |
3770 | la r8,ppUserPmap(r19) ; Point to the current user pmap | |
3771 | ||
3772 | hpfInKern: mr r22,r29 ; Save the high part of faulting address | |
3773 | ||
3774 | bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit... | |
3775 | ||
3776 | ; | |
3777 | ; On 32-bit machines we emulate a segment exception by loading unused SRs with a | |
3778 | ; predefined value that corresponds to no address space. When we see that value | |
3779 | ; we turn off the PTE miss bit in the DSISR to drive the code later on that will | |
3780 | ; cause the proper SR to be loaded. | |
3781 | ; | |
3782 | ||
3783 | lwz r28,4(r8) ; Pick up the pmap | |
3784 | rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive? | |
3785 | mr r25,r28 ; Save the original pmap (in case we nest) | |
91447636 A |
3786 | lwz r0,pmapFlags(r28) ; Get pmap's flags |
3787 | bne hpfGVtest ; Segs are not ours if so... | |
55e303ae A |
3788 | mfsrin r4,r30 ; Get the SR that was used for translation |
3789 | cmplwi r4,invalSpace ; Is this a simulated segment fault? | |
91447636 | 3790 | bne++ hpfGVtest ; No... |
55e303ae A |
3791 | |
3792 | rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR | |
91447636 | 3793 | b hpfGVtest ; Join on up... |
55e303ae A |
3794 | |
3795 | .align 5 | |
3796 | ||
3797 | nop ; Push hpfNest to a 32-byte boundary | |
3798 | nop ; Push hpfNest to a 32-byte boundary | |
3799 | nop ; Push hpfNest to a 32-byte boundary | |
55e303ae A |
3800 | |
3801 | hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit) | |
3802 | mr r25,r28 ; Save the original pmap (in case we nest) | |
91447636 A |
3803 | lwz r0,pmapFlags(r28) ; Get pmap's flags |
3804 | ||
3805 | hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist? | |
3806 | bne hpfGVxlate ; Yup, do accelerated shadow stuff | |
55e303ae A |
3807 | |
3808 | ; | |
3809 | ; This is where we loop descending nested pmaps | |
3810 | ; | |
3811 | ||
3812 | hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3813 | addi r20,r20,-1 ; Count nest try | |
3814 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
3815 | mr. r3,r3 ; Did we get the lock? | |
3816 | bne-- hpfBadLock ; Nope... | |
3817 | ||
3818 | mr r3,r28 ; Get the pmap pointer | |
3819 | mr r4,r22 ; Get top of faulting vaddr | |
3820 | mr r5,r23 ; Get bottom of faulting vaddr | |
3821 | bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags) | |
3822 | ||
3823 | rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one? | |
3824 | mr. r31,r3 ; Save the mapping if we found it | |
3825 | cmplwi cr1,r0,0 ; Check for removal | |
3826 | crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing | |
3827 | ||
3828 | bt-- cr0_eq,hpfNotFound ; Not found or removing... | |
91447636 A |
3829 | |
3830 | rlwinm r0,r7,0,mpType ; Isolate mapping type | |
3831 | cmplwi r0,mpNest ; Are we again nested? | |
3832 | cmplwi cr1,r0,mpLinkage ; Are we a linkage type? | |
3833 | cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type? | |
55e303ae A |
3834 | mr r26,r7 ; Get the flags for this mapping (passed back from search call) |
3835 | ||
3836 | lhz r21,mpSpace(r31) ; Get the space | |
3837 | ||
91447636 | 3838 | bne++ hpfFoundIt ; No, we found our guy... |
55e303ae A |
3839 | |
3840 | ||
3841 | #if pmapTransSize != 12 | |
3842 | #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize | |
3843 | #endif | |
91447636 | 3844 | cmplwi r0,mpLinkage ; Linkage mapping? |
55e303ae | 3845 | cmplwi cr1,r20,0 ; Too many nestings? |
91447636 | 3846 | beq-- hpfSpclNest ; Do we need to do special handling? |
55e303ae A |
3847 | |
3848 | hpfCSrch: lhz r21,mpSpace(r31) ; Get the space | |
3849 | lwz r8,mpNestReloc(r31) ; Get the vaddr relocation | |
3850 | lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half | |
3851 | la r3,pmapSXlk(r28) ; Point to the old pmap search lock | |
3852 | lis r0,0x8000 ; Get 0xFFFFFFFF80000000 | |
3853 | lis r10,hi16(EXT(pmapTrans)) ; Get the translate table | |
3854 | add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit | |
3855 | blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop... | |
3856 | or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit | |
3857 | slwi r11,r21,3 ; Multiply space by 8 | |
3858 | ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part | |
3859 | addc r23,r23,r9 ; Relocate bottom half of vaddr | |
3860 | lwz r10,0(r10) ; Get the actual translation map | |
3861 | slwi r12,r21,2 ; Multiply space by 4 | |
3862 | add r10,r10,r11 ; Add in the higher part of the index | |
3863 | rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit) | |
3864 | adde r22,r22,r8 ; Relocate the top half of the vaddr | |
3865 | add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry | |
3866 | bl sxlkUnlock ; Unlock the search list | |
3867 | ||
91447636 | 3868 | bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines |
55e303ae | 3869 | lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap |
91447636 A |
3870 | cmplwi r28,0 ; Is the pmap paddr valid? |
3871 | bne+ hpfNest ; Nest into new pmap... | |
3872 | b hpfBadPmap ; Handle bad pmap | |
55e303ae | 3873 | |
91447636 | 3874 | hpfGetPmap64: |
55e303ae | 3875 | ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap |
91447636 A |
3876 | cmpldi r28,0 ; Is the pmap paddr valid? |
3877 | bne++ hpfNest ; Nest into new pmap... | |
3878 | b hpfBadPmap ; Handle bad pmap | |
3879 | ||
55e303ae A |
3880 | |
3881 | ; | |
3882 | ; Error condition. We only allow 64 nestings. This keeps us from having to | |
3883 | ; check for recusive nests when we install them. | |
3884 | ; | |
3885 | ||
3886 | .align 5 | |
3887 | ||
3888 | hpfNestTooMuch: | |
3889 | lwz r20,savedsisr(r13) ; Get the DSISR | |
3890 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3891 | bl sxlkUnlock ; Unlock the search list (R3 good from above) | |
3892 | ori r20,r20,1 ; Indicate that there was a nesting problem | |
3893 | stw r20,savedsisr(r13) ; Stash it | |
3894 | lwz r11,saveexception(r13) ; Restore the exception code | |
3895 | b EXT(PFSExit) ; Yes... (probably not though) | |
3896 | ||
3897 | ; | |
3898 | ; Error condition - lock failed - this is fatal | |
3899 | ; | |
3900 | ||
3901 | .align 5 | |
3902 | ||
3903 | hpfBadLock: | |
3904 | lis r0,hi16(Choke) ; System abend | |
3905 | ori r0,r0,lo16(Choke) ; System abend | |
3906 | li r3,failMapping ; Show mapping failure | |
3907 | sc | |
91447636 A |
3908 | |
3909 | ; | |
3910 | ; Error condition - space id selected an invalid pmap - fatal | |
3911 | ; | |
3912 | ||
3913 | .align 5 | |
3914 | ||
3915 | hpfBadPmap: | |
3916 | lis r0,hi16(Choke) ; System abend | |
3917 | ori r0,r0,lo16(Choke) ; System abend | |
3918 | li r3,failPmap ; Show invalid pmap | |
3919 | sc | |
3920 | ||
55e303ae A |
3921 | ; |
3922 | ; Did not find any kind of mapping | |
3923 | ; | |
3924 | ||
3925 | .align 5 | |
3926 | ||
3927 | hpfNotFound: | |
3928 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
3929 | bl sxlkUnlock ; Unlock it | |
3930 | lwz r11,saveexception(r13) ; Restore the exception code | |
3931 | ||
3932 | hpfExit: ; We need this because we can not do a relative branch | |
3933 | b EXT(PFSExit) ; Yes... (probably not though) | |
3934 | ||
3935 | ||
3936 | ; | |
3937 | ; Here is where we handle special mappings. So far, the only use is to load a | |
3938 | ; processor specific segment register for copy in/out handling. | |
3939 | ; | |
3940 | ; The only (so far implemented) special map is used for copyin/copyout. | |
3941 | ; We keep a mapping of a "linkage" mapping in the per_proc. | |
3942 | ; The linkage mapping is basically a nested pmap that is switched in | |
3943 | ; as part of context switch. It relocates the appropriate user address | |
3944 | ; space slice into the right place in the kernel. | |
3945 | ; | |
3946 | ||
3947 | .align 5 | |
3948 | ||
3949 | hpfSpclNest: | |
91447636 A |
3950 | la r31,ppUMWmp(r19) ; Just point to the mapping |
3951 | oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here | |
55e303ae A |
3952 | b hpfCSrch ; Go continue search... |
3953 | ||
3954 | ||
3955 | ; | |
3956 | ; We have now found a mapping for the address we faulted on. | |
3957 | ; | |
3958 | ||
3959 | ; | |
3960 | ; Here we go about calculating what the VSID should be. We concatanate | |
3961 | ; the space ID (14 bits wide) 3 times. We then slide the vaddr over | |
3962 | ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID). | |
3963 | ; Then we XOR and expanded space ID and the shifted vaddr. This gives us | |
3964 | ; the VSID. | |
3965 | ; | |
3966 | ; This is used both for segment handling and PTE handling | |
3967 | ; | |
3968 | ||
3969 | ||
3970 | #if maxAdrSpb != 14 | |
3971 | #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!! | |
3972 | #endif | |
3973 | ||
91447636 A |
3974 | ; Important non-volatile registers at this point ('home' means the final pmap/mapping found |
3975 | ; when a multi-level mapping has been successfully searched): | |
3976 | ; r21: home space id number | |
3977 | ; r22: relocated high-order 32 bits of vaddr | |
3978 | ; r23: relocated low-order 32 bits of vaddr | |
3979 | ; r25: pmap physical address | |
3980 | ; r27: dsisr | |
3981 | ; r28: home pmap physical address | |
3982 | ; r29: high-order 32 bits of faulting vaddr | |
3983 | ; r30: low-order 32 bits of faulting vaddr | |
3984 | ; r31: mapping's physical address | |
3985 | ||
55e303ae A |
3986 | .align 5 |
3987 | ||
3988 | hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment | |
91447636 | 3989 | hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment? |
55e303ae A |
3990 | rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID |
3991 | rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order | |
3992 | rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over | |
91447636 | 3993 | rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag |
55e303ae A |
3994 | rlwimi r21,r21,14,4,17 ; Make a second copy of space above first |
3995 | cmplwi cr5,r0,0 ; Did we just do a special nesting? | |
3996 | rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35 | |
3997 | crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest | |
3998 | rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register | |
3999 | xor r14,r14,r20 ; Calculate the top half of VSID | |
4000 | xor r15,r15,r21 ; Calculate the bottom half of the VSID | |
4001 | rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing) | |
4002 | rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry | |
4003 | rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top | |
4004 | rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position | |
4005 | or r12,r12,r15 ; Add key into the bottom of VSID | |
4006 | ; | |
4007 | ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12 | |
4008 | ||
4009 | bne++ hpfPteMiss ; Nope, normal PTE miss... | |
4010 | ||
4011 | ; | |
4012 | ; Here is the only place that we make an entry in the pmap segment cache. | |
4013 | ; | |
4014 | ; Note that we do not make an entry in the segment cache for special | |
4015 | ; nested mappings. This makes the copy in/out segment get refreshed | |
4016 | ; when switching threads. | |
4017 | ; | |
4018 | ; The first thing that we do is to look up the ESID we are going to load | |
4019 | ; into a segment in the pmap cache. If it is already there, this is | |
4020 | ; a segment that appeared since the last time we switched address spaces. | |
4021 | ; If all is correct, then it was another processors that made the cache | |
4022 | ; entry. If not, well, it is an error that we should die on, but I have | |
4023 | ; not figured a good way to trap it yet. | |
4024 | ; | |
4025 | ; If we get a hit, we just bail, otherwise, lock the pmap cache, select | |
4026 | ; an entry based on the generation number, update the cache entry, and | |
4027 | ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit | |
4028 | ; entries that correspond to the last 4 bits (32:35 for 64-bit and | |
4029 | ; 0:3 for 32-bit) of the ESID. | |
4030 | ; | |
4031 | ; Then we unlock and bail. | |
4032 | ; | |
4033 | ; First lock it. Then select a free slot or steal one based on the generation | |
4034 | ; number. Then store it, update the allocation flags, and unlock. | |
4035 | ; | |
4036 | ; The cache entry contains an image of the ESID/VSID pair we would load for | |
4037 | ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image. | |
4038 | ; | |
4039 | ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not | |
4040 | ; the current one, which may have changed because we nested. | |
4041 | ; | |
4042 | ; Also remember that we do not store the valid bit in the ESID. If we | |
4043 | ; od, this will break some other stuff. | |
4044 | ; | |
4045 | ||
4046 | bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault.... | |
4047 | ||
4048 | mr r3,r25 ; Point to the pmap | |
37839358 A |
4049 | mr r4,r29 ; ESID high half |
4050 | mr r5,r30 ; ESID low half | |
55e303ae A |
4051 | bl pmapCacheLookup ; Go see if this is in the cache already |
4052 | ||
4053 | mr. r3,r3 ; Did we find it? | |
4054 | mr r4,r11 ; Copy this to a different register | |
4055 | ||
4056 | bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry... | |
4057 | ||
4058 | lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table | |
4059 | lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table | |
4060 | ||
4061 | cntlzw r7,r4 ; Find a free slot | |
4062 | ||
4063 | subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one | |
4064 | rlwinm r30,r30,0,0,3 ; Clean up the ESID | |
4065 | srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not | |
4066 | addi r5,r4,1 ; Bump the generation number | |
4067 | and r7,r7,r6 ; Clear bit number if none empty | |
4068 | andc r8,r4,r6 ; Clear generation count if we found an empty | |
4069 | rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word | |
4070 | or r7,r7,r8 ; Select a slot number | |
4071 | li r8,0 ; Clear | |
4072 | andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using | |
4073 | oris r8,r8,0x8000 ; Get the high bit on | |
4074 | la r9,pmapSegCache(r25) ; Point to the segment cache | |
4075 | slwi r6,r7,4 ; Get index into the segment cache | |
4076 | slwi r2,r7,2 ; Get index into the segment cache sub-tag index | |
4077 | srw r8,r8,r7 ; Get the mask | |
4078 | cmplwi r2,32 ; See if we are in the first or second half of sub-tag | |
4079 | li r0,0 ; Clear | |
4080 | rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out | |
4081 | oris r0,r0,0xF000 ; Get the sub-tag mask | |
4082 | add r9,r9,r6 ; Point to the cache slot | |
4083 | srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half) | |
4084 | srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half) | |
4085 | ||
4086 | stw r29,sgcESID(r9) ; Save the top of the ESID | |
4087 | andc r10,r10,r0 ; Clear sub-tag slot in case we are in top | |
4088 | andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom | |
4089 | stw r30,sgcESID+4(r9) ; Save the bottom of the ESID | |
4090 | or r10,r10,r5 ; Stick in subtag in case top half | |
4091 | or r11,r11,r5 ; Stick in subtag in case bottom half | |
4092 | stw r14,sgcVSID(r9) ; Save the top of the VSID | |
4093 | andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated | |
4094 | stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key | |
4095 | bge hpfSCSTbottom ; Go save the bottom part of sub-tag | |
4096 | ||
4097 | stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag | |
4098 | b hpfNoCacheEnt ; Go finish up... | |
4099 | ||
4100 | hpfSCSTbottom: | |
4101 | stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag | |
4102 | ||
4103 | ||
4104 | hpfNoCacheEnt: | |
4105 | eieio ; Make sure cache is updated before lock | |
4106 | stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number | |
4107 | ||
4108 | ||
4109 | hpfNoCacheEnt2: | |
4110 | lwz r4,ppMapFlags(r19) ; Get the protection key modifier | |
4111 | bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment... | |
4112 | ||
4113 | ; | |
4114 | ; Make and enter 32-bit segment register | |
4115 | ; | |
4116 | ||
4117 | lwz r16,validSegs(r19) ; Get the valid SR flags | |
4118 | xor r12,r12,r4 ; Alter the storage key before loading segment register | |
4119 | rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting | |
4120 | rlwinm r6,r12,19,1,3 ; Insert the keys and N bit | |
4121 | lis r0,0x8000 ; Set bit 0 | |
4122 | rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID | |
4123 | srw r0,r0,r2 ; Get bit corresponding to SR | |
4124 | rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents | |
4125 | or r16,r16,r0 ; Show that SR is valid | |
4126 | ||
4127 | mtsrin r6,r30 ; Set the actual SR | |
4128 | ||
4129 | stw r16,validSegs(r19) ; Set the valid SR flags | |
4130 | ||
4131 | b hpfPteMiss ; SR loaded, go do a PTE... | |
4132 | ||
4133 | ; | |
4134 | ; Make and enter 64-bit segment look-aside buffer entry. | |
4135 | ; Note that the cache entry is the right format except for valid bit. | |
4136 | ; We also need to convert from long long to 64-bit register values. | |
4137 | ; | |
4138 | ||
4139 | ||
4140 | .align 5 | |
4141 | ||
4142 | hpfLoadSeg64: | |
4143 | ld r16,validSegs(r19) ; Get the valid SLB entry flags | |
4144 | sldi r8,r29,32 ; Move high order address over | |
4145 | sldi r10,r14,32 ; Move high part of VSID over | |
4146 | ||
4147 | not r3,r16 ; Make valids be 0s | |
4148 | li r0,1 ; Prepare to set bit 0 | |
4149 | ||
4150 | cntlzd r17,r3 ; Find a free SLB | |
4151 | xor r12,r12,r4 ; Alter the storage key before loading segment table entry | |
4152 | or r9,r8,r30 ; Form full 64-bit address | |
4153 | cmplwi r17,63 ; Did we find a free SLB entry? | |
4154 | sldi r0,r0,63 ; Get bit 0 set | |
4155 | or r10,r10,r12 ; Move in low part and keys | |
4156 | addi r17,r17,1 ; Skip SLB 0 always | |
4157 | blt++ hpfFreeSeg ; Yes, go load it... | |
4158 | ||
4159 | ; | |
4160 | ; No free SLB entries, select one that is in use and invalidate it | |
4161 | ; | |
4162 | lwz r4,ppSegSteal(r19) ; Get the next slot to steal | |
4163 | addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only | |
4164 | addi r4,r4,1 ; Set next slot to steal | |
4165 | slbmfee r7,r17 ; Get the entry that is in the selected spot | |
4166 | subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap | |
4167 | rldicr r7,r7,0,35 ; Clear the valid bit and the rest | |
4168 | srawi r2,r2,31 ; Get -1 if steal index still in range | |
4169 | slbie r7 ; Invalidate the in-use SLB entry | |
4170 | and r4,r4,r2 ; Reset steal index when it should wrap | |
4171 | isync ; | |
4172 | ||
4173 | stw r4,ppSegSteal(r19) ; Set the next slot to steal | |
4174 | ; | |
4175 | ; We are now ready to stick the SLB entry in the SLB and mark it in use | |
4176 | ; | |
4177 | ||
4178 | hpfFreeSeg: | |
4179 | subi r4,r17,1 ; Adjust shift to account for skipping slb 0 | |
4180 | mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear | |
4181 | srd r0,r0,r4 ; Set bit mask for allocation | |
4182 | oris r9,r9,0x0800 ; Turn on the valid bit | |
4183 | or r16,r16,r0 ; Turn on the allocation flag | |
4184 | rldimi r9,r17,0,58 ; Copy in the SLB entry selector | |
4185 | ||
4186 | beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest... | |
4187 | slbie r7 ; Blow away a potential duplicate | |
4188 | ||
4189 | hpfNoBlow: slbmte r10,r9 ; Make that SLB entry | |
4190 | ||
4191 | std r16,validSegs(r19) ; Mark as valid | |
4192 | b hpfPteMiss ; STE loaded, go do a PTE... | |
4193 | ||
4194 | ; | |
4195 | ; The segment has been set up and loaded if need be. Now we are ready to build the | |
4196 | ; PTE and get it into the hash table. | |
4197 | ; | |
4198 | ; Note that there is actually a race here. If we start fault processing on | |
4199 | ; a different pmap, i.e., we have descended into a nested pmap, it is possible | |
4200 | ; that the nest could have been removed from the original pmap. We would | |
4201 | ; succeed with this translation anyway. I do not think we need to worry | |
4202 | ; about this (famous last words) because nobody should be unnesting anything | |
4203 | ; if there are still people activily using them. It should be up to the | |
4204 | ; higher level VM system to put the kibosh on this. | |
4205 | ; | |
4206 | ; There is also another race here: if we fault on the same mapping on more than | |
4207 | ; one processor at the same time, we could end up with multiple PTEs for the same | |
4208 | ; mapping. This is not a good thing.... We really only need one of the | |
4209 | ; fault handlers to finish, so what we do is to set a "fault in progress" flag in | |
4210 | ; the mapping. If we see that set, we just abandon the handler and hope that by | |
4211 | ; the time we restore context and restart the interrupted code, the fault has | |
4212 | ; been resolved by the other guy. If not, we will take another fault. | |
4213 | ; | |
4214 | ||
4215 | ; | |
4216 | ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not. | |
4217 | ; It is required to stay there until after we call mapSelSlot!!!! | |
4218 | ; | |
4219 | ||
4220 | .align 5 | |
4221 | ||
4222 | hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field | |
4223 | lwz r12,mpPte(r31) ; Get the quick pointer to PTE | |
4224 | li r3,mpHValid ; Get the PTE valid bit | |
4225 | andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side? | |
4226 | ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag | |
4227 | crnot cr1_eq,cr0_eq ; Remember if FIP was on | |
4228 | and. r12,r12,r3 ; Isolate the valid bit | |
4229 | crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail... | |
4230 | beq-- hpfAbandon ; Yes, other processor is or already has handled this... | |
91447636 A |
4231 | rlwinm r0,r2,0,mpType ; Isolate mapping type |
4232 | cmplwi r0,mpBlock ; Is this a block mapping? | |
4233 | crnot cr7_eq,cr0_eq ; Remember if we have a block mapping | |
55e303ae A |
4234 | stwcx. r2,0,r31 ; Store the flags |
4235 | bne-- hpfPteMiss ; Collision, try again... | |
4236 | ||
4237 | bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff... | |
4238 | ||
4239 | ; | |
4240 | ; At this point we are about to do the 32-bit PTE generation. | |
4241 | ; | |
4242 | ; The following is the R14:R15 pair that contains the "shifted" VSID: | |
4243 | ; | |
4244 | ; 1 2 3 4 4 5 6 | |
4245 | ; 0 8 6 4 2 0 8 6 3 | |
4246 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4247 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| | |
4248 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4249 | ; | |
4250 | ; The 24 bits of the 32-bit architecture VSID is in the following: | |
4251 | ; | |
4252 | ; 1 2 3 4 4 5 6 | |
4253 | ; 0 8 6 4 2 0 8 6 3 | |
4254 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4255 | ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| | |
4256 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4257 | ; | |
4258 | ||
4259 | ||
4260 | hpfBldPTE32: | |
4261 | lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion) | |
4262 | lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping | |
4263 | ||
4264 | mfsdr1 r27 ; Get the hash table base address | |
4265 | ||
4266 | rlwinm r0,r23,0,4,19 ; Isolate just the page index | |
4267 | rlwinm r18,r23,10,26,31 ; Extract the API | |
4268 | xor r19,r15,r0 ; Calculate hash << 12 | |
4269 | mr r2,r25 ; Save the flag part of the mapping | |
4270 | rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image | |
4271 | rlwinm r16,r27,16,7,15 ; Extract the hash table size | |
4272 | rlwinm r25,r25,0,0,19 ; Clear out the flags | |
4273 | slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported) | |
4274 | sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map) | |
4275 | ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask | |
4276 | rlwinm r27,r27,0,0,15 ; Extract the hash table base | |
4277 | rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table | |
4278 | add r24,r24,r25 ; Adjust to true physical address | |
4279 | rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image | |
4280 | rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot | |
4281 | and r19,r19,r16 ; Wrap hash table offset into the hash table | |
4282 | ori r24,r24,lo16(mpR) ; Turn on the reference bit right now | |
4283 | rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA | |
4284 | add r19,r19,r27 ; Point to the PTEG | |
4285 | subfic r20,r20,-4 ; Get negative offset to PCA | |
4286 | oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on | |
4287 | add r20,r20,r27 ; Point to the PCA slot | |
4288 | ||
4289 | ; | |
4290 | ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower. | |
4291 | ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA. | |
4292 | ; | |
4293 | ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible | |
4294 | ; that some other processor beat us and stuck in a PTE or that | |
4295 | ; all we had was a simple segment exception and the PTE was there the whole time. | |
4296 | ; If we find one a pointer, we are done. | |
4297 | ; | |
4298 | ||
4299 | mr r7,r20 ; Copy the PCA pointer | |
4300 | bl mapLockPteg ; Lock the PTEG | |
4301 | ||
4302 | lwz r12,mpPte(r31) ; Get the offset to the PTE | |
4303 | mr r17,r6 ; Remember the PCA image | |
4304 | mr r16,r6 ; Prime the post-select PCA image | |
4305 | andi. r0,r12,mpHValid ; Is there a PTE here already? | |
4306 | li r21,8 ; Get the number of slots | |
4307 | ||
4308 | bne- cr7,hpfNoPte32 ; Skip this for a block mapping... | |
4309 | ||
4310 | bne- hpfBailOut ; Someone already did this for us... | |
4311 | ||
4312 | ; | |
91447636 | 4313 | ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a |
55e303ae A |
4314 | ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was |
4315 | ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE. | |
4316 | ; R4 returns the slot index. | |
4317 | ; | |
4318 | ; REMEMBER: CR7 indicates that we are building a block mapping. | |
4319 | ; | |
4320 | ||
4321 | hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots | |
4322 | mr r6,r17 ; Get back the original PCA | |
4323 | rlwimi r6,r16,0,8,15 ; Insert the updated steal slot | |
4324 | blt- hpfBailOut ; Holy Cow, all slots are locked... | |
4325 | ||
4326 | bl mapSelSlot ; Go select a slot (note that the PCA image is already set up) | |
4327 | ||
a3d08fcd A |
4328 | cmplwi cr5,r3,1 ; Did we steal a slot? |
4329 | rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address | |
55e303ae A |
4330 | mr r16,r6 ; Remember the PCA image after selection |
4331 | blt+ cr5,hpfInser32 ; Nope, no steal... | |
4332 | ||
4333 | lwz r6,0(r19) ; Get the old PTE | |
4334 | lwz r7,4(r19) ; Get the real part of the stealee | |
4335 | rlwinm r6,r6,0,1,31 ; Clear the valid bit | |
4336 | bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping... | |
4337 | srwi r3,r7,12 ; Change phys address to a ppnum | |
4338 | bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page) | |
4339 | cmplwi cr1,r3,0 ; Check if this is in RAM | |
4340 | bne- hpfNoPte32 ; Could not get it, try for another... | |
4341 | ||
4342 | crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map | |
4343 | ||
4344 | hpfNipBM: stw r6,0(r19) ; Set the invalid PTE | |
4345 | ||
4346 | sync ; Make sure the invalid is stored | |
4347 | li r9,tlbieLock ; Get the TLBIE lock | |
4348 | rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part | |
4349 | ||
4350 | hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock | |
4351 | mfsprg r4,0 ; Get the per_proc | |
4352 | rlwinm r8,r6,25,18,31 ; Extract the space ID | |
4353 | rlwinm r11,r6,25,18,31 ; Extract the space ID | |
4354 | lwz r7,hwSteals(r4) ; Get the steal count | |
4355 | srwi r2,r6,7 ; Align segment number with hash | |
4356 | rlwimi r11,r11,14,4,17 ; Get copy above ourselves | |
4357 | mr. r0,r0 ; Is it locked? | |
4358 | srwi r0,r19,6 ; Align PTEG offset for back hash | |
4359 | xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits) | |
4360 | xor r11,r11,r0 ; Hash backwards to partial vaddr | |
4361 | rlwinm r12,r2,14,0,3 ; Shift segment up | |
4362 | mfsprg r2,2 ; Get feature flags | |
4363 | li r0,1 ; Get our lock word | |
4364 | rlwimi r12,r6,22,4,9 ; Move up the API | |
4365 | bne- hpfTLBIE32 ; It is locked, go wait... | |
4366 | rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr | |
4367 | ||
4368 | stwcx. r0,0,r9 ; Try to get it | |
4369 | bne- hpfTLBIE32 ; We was beat... | |
4370 | addi r7,r7,1 ; Bump the steal count | |
4371 | ||
4372 | rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box? | |
4373 | li r0,0 ; Lock clear value | |
4374 | ||
4375 | tlbie r12 ; Invalidate it everywhere | |
4376 | ||
91447636 | 4377 | |
55e303ae A |
4378 | beq- hpfNoTS32 ; Can not have MP on this machine... |
4379 | ||
4380 | eieio ; Make sure that the tlbie happens first | |
4381 | tlbsync ; Wait for everyone to catch up | |
4382 | sync ; Make sure of it all | |
91447636 A |
4383 | |
4384 | hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock | |
5eebf738 A |
4385 | |
4386 | stw r7,hwSteals(r4) ; Save the steal count | |
55e303ae A |
4387 | bgt cr5,hpfInser32 ; We just stole a block mapping... |
4388 | ||
4389 | lwz r4,4(r19) ; Get the RC of the just invalidated PTE | |
4390 | ||
4391 | la r11,ppLink+4(r3) ; Point to the master RC copy | |
4392 | lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping | |
4393 | rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC | |
4394 | ||
4395 | hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC | |
4396 | or r0,r0,r2 ; Merge in the new RC | |
4397 | stwcx. r0,0,r11 ; Try to stick it back | |
4398 | bne- hpfMrgRC32 ; Try again if we collided... | |
4399 | ||
4400 | ||
91447636 | 4401 | hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address |
55e303ae A |
4402 | beq- hpfLostPhys ; We could not find our mapping. Kick the bucket... |
4403 | ||
4404 | lhz r10,mpSpace(r7) ; Get the space | |
4405 | lwz r9,mpVAddr+4(r7) ; And the vaddr | |
4406 | cmplw cr1,r10,r8 ; Is this one of ours? | |
4407 | xor r9,r12,r9 ; Compare virtual address | |
4408 | cmplwi r9,0x1000 ; See if we really match | |
4409 | crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match | |
4410 | beq+ hpfFPnch2 ; Yes, found ours... | |
4411 | ||
4412 | lwz r7,mpAlias+4(r7) ; Chain on to the next | |
4413 | b hpfFPnch ; Check it out... | |
4414 | ||
4415 | hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG | |
4416 | stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG) | |
4417 | bl mapPhysUnlock ; Unlock the physent now | |
4418 | ||
4419 | hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on | |
4420 | ||
4421 | stw r24,4(r19) ; Stuff in the real part of the PTE | |
4422 | eieio ; Make sure this gets there first | |
4423 | ||
4424 | stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid | |
4425 | mr r17,r16 ; Get the PCA image to save | |
4426 | b hpfFinish ; Go join the common exit code... | |
4427 | ||
4428 | ||
4429 | ; | |
4430 | ; At this point we are about to do the 64-bit PTE generation. | |
4431 | ; | |
4432 | ; The following is the R14:R15 pair that contains the "shifted" VSID: | |
4433 | ; | |
4434 | ; 1 2 3 4 4 5 6 | |
4435 | ; 0 8 6 4 2 0 8 6 3 | |
4436 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4437 | ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////| | |
4438 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
4439 | ; | |
4440 | ; | |
4441 | ||
4442 | .align 5 | |
4443 | ||
4444 | hpfBldPTE64: | |
4445 | ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping | |
4446 | lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping | |
4447 | ||
4448 | mfsdr1 r27 ; Get the hash table base address | |
4449 | ||
4450 | sldi r11,r22,32 ; Slide top of adjusted EA over | |
4451 | sldi r14,r14,32 ; Slide top of VSID over | |
4452 | rlwinm r5,r27,0,27,31 ; Isolate the size | |
4453 | eqv r16,r16,r16 ; Get all foxes here | |
4454 | rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN | |
4455 | mr r2,r10 ; Save the flag part of the mapping | |
4456 | or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value | |
4457 | rldicr r27,r27,0,45 ; Clean up the hash table base | |
4458 | or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value | |
4459 | rlwinm r0,r11,0,4,19 ; Clear out everything but the page | |
4460 | subfic r5,r5,46 ; Get number of leading zeros | |
4461 | xor r19,r0,r15 ; Calculate hash | |
4462 | ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE | |
4463 | srd r16,r16,r5 ; Shift over to get length of table | |
4464 | srdi r19,r19,5 ; Convert page offset to hash table offset | |
4465 | rldicr r16,r16,0,56 ; Clean up lower bits in hash table size | |
4466 | rldicr r10,r10,0,51 ; Clear out flags | |
4467 | sldi r24,r24,12 ; Change ppnum to physical address | |
4468 | sub r11,r11,r10 ; Get the offset from the base mapping | |
4469 | and r19,r19,r16 ; Wrap into hash table | |
4470 | add r24,r24,r11 ; Get actual physical address of this page | |
4471 | srdi r20,r19,5 ; Convert PTEG offset to PCA offset | |
4472 | rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc. | |
4473 | subfic r20,r20,-4 ; Get negative offset to PCA | |
4474 | ori r24,r24,lo16(mpR) ; Force on the reference bit | |
4475 | add r20,r20,r27 ; Point to the PCA slot | |
4476 | add r19,r19,r27 ; Point to the PTEG | |
4477 | ||
4478 | ; | |
4479 | ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower. | |
4480 | ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA. | |
4481 | ; | |
4482 | ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible | |
4483 | ; that some other processor beat us and stuck in a PTE or that | |
4484 | ; all we had was a simple segment exception and the PTE was there the whole time. | |
4485 | ; If we find one a pointer, we are done. | |
4486 | ; | |
4487 | ||
4488 | mr r7,r20 ; Copy the PCA pointer | |
4489 | bl mapLockPteg ; Lock the PTEG | |
4490 | ||
4491 | lwz r12,mpPte(r31) ; Get the offset to the PTE | |
4492 | mr r17,r6 ; Remember the PCA image | |
4493 | mr r18,r6 ; Prime post-selection PCA image | |
4494 | andi. r0,r12,mpHValid ; See if we have a PTE now | |
4495 | li r21,8 ; Get the number of slots | |
4496 | ||
4497 | bne-- cr7,hpfNoPte64 ; Skip this for a block mapping... | |
4498 | ||
4499 | bne-- hpfBailOut ; Someone already did this for us... | |
4500 | ||
4501 | ; | |
4502 | ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a | |
4503 | ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was | |
4504 | ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE. | |
4505 | ; R4 returns the slot index. | |
4506 | ; | |
4507 | ; REMEMBER: CR7 indicates that we are building a block mapping. | |
4508 | ; | |
4509 | ||
4510 | hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots | |
4511 | mr r6,r17 ; Restore original state of PCA | |
4512 | rlwimi r6,r18,0,8,15 ; Insert the updated steal slot | |
4513 | blt- hpfBailOut ; Holy Cow, all slots are locked... | |
4514 | ||
4515 | bl mapSelSlot ; Go select a slot | |
4516 | ||
4517 | cmplwi cr5,r3,1 ; Did we steal a slot? | |
55e303ae | 4518 | mr r18,r6 ; Remember the PCA image after selection |
a3d08fcd | 4519 | insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address |
55e303ae A |
4520 | lwz r10,hwSteals(r2) ; Get the steal count |
4521 | blt++ cr5,hpfInser64 ; Nope, no steal... | |
4522 | ||
4523 | ld r6,0(r19) ; Get the old PTE | |
4524 | ld r7,8(r19) ; Get the real part of the stealee | |
4525 | rldicr r6,r6,0,62 ; Clear the valid bit | |
4526 | bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping... | |
4527 | srdi r3,r7,12 ; Change page address to a page address | |
4528 | bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page) | |
4529 | cmplwi cr1,r3,0 ; Check if this is in RAM | |
4530 | bne-- hpfNoPte64 ; Could not get it, try for another... | |
4531 | ||
4532 | crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map | |
4533 | ||
4534 | hpfNipBMx: std r6,0(r19) ; Set the invalid PTE | |
4535 | li r9,tlbieLock ; Get the TLBIE lock | |
4536 | ||
4537 | srdi r11,r6,5 ; Shift VSID over for back hash | |
4538 | mfsprg r4,0 ; Get the per_proc | |
4539 | xor r11,r11,r19 ; Hash backwards to get low bits of VPN | |
4540 | sync ; Make sure the invalid is stored | |
4541 | ||
4542 | sldi r12,r6,16 ; Move AVPN to EA position | |
4543 | sldi r11,r11,5 ; Move this to the page position | |
4544 | ||
4545 | hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock | |
4546 | mr. r0,r0 ; Is it locked? | |
4547 | li r0,1 ; Get our lock word | |
4548 | bne-- hpfTLBIE65 ; It is locked, go wait... | |
4549 | ||
4550 | stwcx. r0,0,r9 ; Try to get it | |
4551 | rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN | |
4552 | rldicl r8,r6,52,50 ; Isolate the address space ID | |
4553 | bne-- hpfTLBIE64 ; We was beat... | |
4554 | addi r10,r10,1 ; Bump the steal count | |
4555 | ||
4556 | rldicl r11,r12,0,16 ; Clear cause the book says so | |
4557 | li r0,0 ; Lock clear value | |
4558 | ||
4559 | tlbie r11 ; Invalidate it everywhere | |
4560 | ||
55e303ae A |
4561 | mr r7,r8 ; Get a copy of the space ID |
4562 | eieio ; Make sure that the tlbie happens first | |
4563 | rldimi r7,r7,14,36 ; Copy address space to make hash value | |
4564 | tlbsync ; Wait for everyone to catch up | |
4565 | rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top | |
55e303ae A |
4566 | srdi r2,r6,26 ; Shift original segment down to bottom |
4567 | ||
4568 | ptesync ; Make sure of it all | |
4569 | xor r7,r7,r2 ; Compute original segment | |
91447636 | 4570 | stw r0,tlbieLock(0) ; Clear the tlbie lock |
55e303ae A |
4571 | |
4572 | stw r10,hwSteals(r4) ; Save the steal count | |
4573 | bgt cr5,hpfInser64 ; We just stole a block mapping... | |
4574 | ||
4575 | rldimi r12,r7,28,0 ; Insert decoded segment | |
4576 | rldicl r4,r12,0,13 ; Trim to max supported address | |
4577 | ||
4578 | ld r12,8(r19) ; Get the RC of the just invalidated PTE | |
4579 | ||
4580 | la r11,ppLink+4(r3) ; Point to the master RC copy | |
4581 | ld r7,ppLink(r3) ; Grab the pointer to the first mapping | |
4582 | rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC | |
4583 | ||
4584 | hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC | |
91447636 | 4585 | li r12,ppLFAmask ; Get mask to clean up alias pointer |
55e303ae | 4586 | or r0,r0,r2 ; Merge in the new RC |
91447636 | 4587 | rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F |
55e303ae A |
4588 | stwcx. r0,0,r11 ; Try to stick it back |
4589 | bne-- hpfMrgRC64 ; Try again if we collided... | |
4590 | ||
4591 | hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address | |
4592 | beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket... | |
4593 | ||
4594 | lhz r10,mpSpace(r7) ; Get the space | |
4595 | ld r9,mpVAddr(r7) ; And the vaddr | |
4596 | cmplw cr1,r10,r8 ; Is this one of ours? | |
4597 | xor r9,r4,r9 ; Compare virtual address | |
4598 | cmpldi r9,0x1000 ; See if we really match | |
4599 | crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match | |
4600 | beq++ hpfFPnch2x ; Yes, found ours... | |
4601 | ||
4602 | ld r7,mpAlias(r7) ; Chain on to the next | |
4603 | b hpfFPnchx ; Check it out... | |
4604 | ||
4605 | .align 5 | |
4606 | ||
4607 | hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area | |
4608 | stwcx. r7,0,r7 ; Kill reservation | |
4609 | ||
4610 | hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock | |
4611 | mr. r0,r0 ; Is it locked? | |
4612 | beq++ hpfTLBIE64 ; Yup, wait for it... | |
4613 | b hpfTLBIE63 ; Nope, try again.. | |
4614 | ||
4615 | ||
4616 | ||
4617 | hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG | |
4618 | stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though) | |
4619 | bl mapPhysUnlock ; Unlock the physent now | |
4620 | ||
4621 | ||
4622 | hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE | |
4623 | eieio ; Make sure this gets there first | |
4624 | std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid | |
4625 | mr r17,r18 ; Get the PCA image to set | |
4626 | b hpfFinish ; Go join the common exit code... | |
4627 | ||
4628 | hpfLostPhys: | |
4629 | lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead | |
4630 | ori r0,r0,lo16(Choke) ; System abend | |
4631 | sc | |
4632 | ||
4633 | ; | |
4634 | ; This is the common code we execute when we are finished setting up the PTE. | |
4635 | ; | |
4636 | ||
4637 | .align 5 | |
4638 | ||
4639 | hpfFinish: sub r4,r19,r27 ; Get offset of PTE | |
4640 | ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset | |
4641 | bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map | |
4642 | stw r4,mpPte(r31) ; Remember our PTE | |
4643 | ||
4644 | hpfBailOut: eieio ; Make sure all updates come first | |
4645 | stw r17,0(r20) ; Unlock and set the final PCA | |
4646 | ||
4647 | ; | |
4648 | ; This is where we go if we have started processing the fault, but find that someone | |
4649 | ; else has taken care of it. | |
4650 | ; | |
4651 | ||
4652 | hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags | |
4653 | rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag | |
4654 | sth r2,mpFlags+2(r31) ; Set it | |
4655 | ||
4656 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
4657 | bl sxlkUnlock ; Unlock the search list | |
4658 | ||
4659 | li r11,T_IN_VAIN ; Say that it was handled | |
4660 | b EXT(PFSExit) ; Leave... | |
4661 | ||
4662 | ; | |
4663 | ; This is where we go when we find that someone else | |
4664 | ; is in the process of handling the fault. | |
4665 | ; | |
4666 | ||
4667 | hpfAbandon: li r3,lgKillResv ; Kill off any reservation | |
4668 | stwcx. r3,0,r3 ; Do it | |
4669 | ||
4670 | la r3,pmapSXlk(r28) ; Point to the pmap search lock | |
4671 | bl sxlkUnlock ; Unlock the search list | |
4672 | ||
4673 | li r11,T_IN_VAIN ; Say that it was handled | |
4674 | b EXT(PFSExit) ; Leave... | |
4675 | ||
91447636 A |
4676 | ; |
4677 | ; Guest shadow assist -- page fault handler | |
4678 | ; | |
4679 | ; Here we handle a fault in a guest pmap that has the guest shadow mapping | |
4680 | ; assist active. We locate the VMM pmap extension block, which contains an | |
4681 | ; index over the discontiguous multi-page shadow hash table. The index | |
4682 | ; corresponding to our vaddr is selected, and the selected group within | |
4683 | ; that page is searched for a valid and active entry that contains | |
4684 | ; our vaddr and space id. The search is pipelined, so that we may fetch | |
4685 | ; the next slot while examining the current slot for a hit. The final | |
4686 | ; search iteration is unrolled so that we don't fetch beyond the end of | |
4687 | ; our group, which could have dire consequences depending upon where the | |
4688 | ; physical hash page is located. | |
4689 | ; | |
4690 | ; The VMM pmap extension block occupies a page. Begining at offset 0, we | |
4691 | ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary | |
4692 | ; after the pmap_vmm_ext is the hash table physical address index, a | |
4693 | ; linear list of 64-bit physical addresses of the pages that comprise | |
4694 | ; the hash table. | |
4695 | ; | |
4696 | ; In the event that we succesfully locate a guest mapping, we re-join | |
4697 | ; the page fault path at hpfGVfound with the mapping's address in r31; | |
4698 | ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding | |
4699 | ; a share of the pmap search lock for the host pmap with the host pmap's | |
4700 | ; address in r28, the guest pmap's space id in r21, and the guest pmap's | |
4701 | ; flags in r12. | |
4702 | ; | |
4703 | ||
4704 | .align 5 | |
4705 | hpfGVxlate: | |
4706 | bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine | |
4707 | ||
4708 | lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr | |
4709 | lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags | |
4710 | lwz r21,pmapSpace(r28) ; r21 <- guest space ID number | |
4711 | lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr | |
4712 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
4713 | rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr | |
4714 | lwz r6,vxsGpf(r11) ; Get guest fault count | |
4715 | ||
4716 | srwi r3,r10,12 ; Form shadow hash: | |
4717 | xor r3,r3,r21 ; spaceID ^ (vaddr >> 12) | |
4718 | rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
4719 | ; Form index offset from hash page number | |
4720 | add r31,r31,r4 ; r31 <- hash page index entry | |
4721 | lwz r31,4(r31) ; r31 <- hash page paddr | |
4722 | rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK | |
4723 | ; r31 <- hash group paddr | |
4724 | ||
4725 | la r3,pmapSXlk(r28) ; Point to the host pmap's search lock | |
4726 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
4727 | mr. r3,r3 ; Did we get the lock? | |
4728 | bne- hpfBadLock ; Nope... | |
4729 | ||
4730 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
4731 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
4732 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
4733 | addi r6,r6,1 ; Increment guest fault count | |
4734 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
4735 | mtctr r0 ; in this group | |
4736 | stw r6,vxsGpf(r11) ; Update guest fault count | |
4737 | b hpfGVlp32 | |
4738 | ||
4739 | .align 5 | |
4740 | hpfGVlp32: | |
4741 | mr r6,r3 ; r6 <- current mapping slot's flags | |
4742 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
4743 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
4744 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
4745 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
4746 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
4747 | andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
4748 | xor r7,r7,r21 ; Compare space ID | |
4749 | or r0,r6,r7 ; r0 <- !(!free && !dormant && space match) | |
4750 | xor r8,r8,r10 ; Compare virtual address | |
4751 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4752 | beq hpfGVfound ; Join common patch on hit (r31 points to mapping) | |
4753 | ||
4754 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
4755 | bdnz hpfGVlp32 ; Iterate | |
4756 | ||
4757 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
4758 | andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag | |
4759 | xor r4,r4,r21 ; Compare space ID | |
4760 | or r0,r3,r4 ; r0 <- !(!free && !dormant && space match) | |
4761 | xor r5,r5,r10 ; Compare virtual address | |
4762 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4763 | beq hpfGVfound ; Join common patch on hit (r31 points to mapping) | |
4764 | ||
4765 | b hpfGVmiss | |
4766 | ||
4767 | .align 5 | |
4768 | hpfGV64: | |
4769 | ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr | |
4770 | lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags | |
4771 | lwz r21,pmapSpace(r28) ; r21 <- guest space ID number | |
4772 | ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr | |
4773 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
4774 | rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr | |
4775 | rldimi r10,r29,32,0 ; cleaning up low-order 12 bits | |
4776 | lwz r6,vxsGpf(r11) ; Get guest fault count | |
4777 | ||
4778 | srwi r3,r10,12 ; Form shadow hash: | |
4779 | xor r3,r3,r21 ; spaceID ^ (vaddr >> 12) | |
4780 | rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
4781 | ; Form index offset from hash page number | |
4782 | add r31,r31,r4 ; r31 <- hash page index entry | |
4783 | ld r31,0(r31) ; r31 <- hash page paddr | |
4784 | insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
4785 | ; r31 <- hash group paddr | |
4786 | ||
4787 | la r3,pmapSXlk(r28) ; Point to the host pmap's search lock | |
4788 | bl sxlkShared ; Go get a shared lock on the mapping lists | |
4789 | mr. r3,r3 ; Did we get the lock? | |
4790 | bne-- hpfBadLock ; Nope... | |
55e303ae | 4791 | |
91447636 A |
4792 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags |
4793 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
4794 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
4795 | addi r6,r6,1 ; Increment guest fault count | |
4796 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
4797 | mtctr r0 ; in this group | |
4798 | stw r6,vxsGpf(r11) ; Update guest fault count | |
4799 | b hpfGVlp64 | |
4800 | ||
4801 | .align 5 | |
4802 | hpfGVlp64: | |
4803 | mr r6,r3 ; r6 <- current mapping slot's flags | |
4804 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
4805 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
4806 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
4807 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
4808 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
4809 | andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag | |
4810 | xor r7,r7,r21 ; Compare space ID | |
4811 | or r0,r6,r7 ; r0 <- !(!free && !dormant && space match) | |
4812 | xor r8,r8,r10 ; Compare virtual address | |
4813 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4814 | beq hpfGVfound ; Join common path on hit (r31 points to mapping) | |
4815 | ||
4816 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
4817 | bdnz hpfGVlp64 ; Iterate | |
4818 | ||
4819 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
4820 | andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag | |
4821 | xor r4,r4,r21 ; Compare space ID | |
4822 | or r0,r3,r4 ; r0 <- !(!free && !dormant && space match) | |
4823 | xor r5,r5,r10 ; Compare virtual address | |
4824 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
4825 | beq hpfGVfound ; Join common path on hit (r31 points to mapping) | |
4826 | ||
4827 | hpfGVmiss: | |
4828 | lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count | |
4829 | addi r6,r6,1 ; Increment miss count | |
4830 | stw r6,vxsGpfMiss(r11) ; Update guest fault miss count | |
4831 | b hpfNotFound | |
55e303ae A |
4832 | |
4833 | /* | |
4834 | * hw_set_user_space(pmap) | |
4835 | * hw_set_user_space_dis(pmap) | |
4836 | * | |
4837 | * Indicate whether memory space needs to be switched. | |
4838 | * We really need to turn off interrupts here, because we need to be non-preemptable | |
de355530 A |
4839 | * |
4840 | * hw_set_user_space_dis is used when interruptions are already disabled. Mind the | |
4841 | * register usage here. The VMM switch code in vmachmon.s that calls this | |
4842 | * know what registers are in use. Check that if these change. | |
4843 | */ | |
1c79356b | 4844 | |
1c79356b | 4845 | |
55e303ae A |
4846 | |
4847 | .align 5 | |
4848 | .globl EXT(hw_set_user_space) | |
4849 | ||
4850 | LEXT(hw_set_user_space) | |
4851 | ||
4852 | lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable | |
4853 | mfmsr r10 ; Get the current MSR | |
4854 | ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP | |
4855 | ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE | |
4856 | andc r10,r10,r8 ; Turn off VEC, FP for good | |
4857 | andc r9,r10,r9 ; Turn off EE also | |
4858 | mtmsr r9 ; Disable them | |
4859 | isync ; Make sure FP and vec are off | |
91447636 A |
4860 | mfsprg r6,1 ; Get the current activation |
4861 | lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block | |
55e303ae A |
4862 | lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address |
4863 | mfsprg r4,2 ; The the feature flags | |
4864 | lwz r7,pmapvr(r3) ; Get the v to r translation | |
4865 | lwz r8,pmapvr+4(r3) ; Get the v to r translation | |
4866 | mtcrf 0x80,r4 ; Get the Altivec flag | |
4867 | xor r4,r3,r8 ; Get bottom of the real address of bmap anchor | |
4868 | cmplw cr1,r3,r2 ; Same address space as before? | |
4869 | stw r7,ppUserPmap(r6) ; Show our real pmap address | |
4870 | crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine | |
4871 | stw r4,ppUserPmap+4(r6) ; Show our real pmap address | |
4872 | stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address | |
4873 | mtmsr r10 ; Restore interruptions | |
4874 | beqlr-- cr1 ; Leave if the same address space or not Altivec | |
4875 | ||
4876 | dssall ; Need to kill all data streams if adrsp changed | |
4877 | sync | |
4878 | blr ; Return... | |
4879 | ||
4880 | .align 5 | |
4881 | .globl EXT(hw_set_user_space_dis) | |
4882 | ||
4883 | LEXT(hw_set_user_space_dis) | |
4884 | ||
4885 | lwz r7,pmapvr(r3) ; Get the v to r translation | |
4886 | mfsprg r4,2 ; The the feature flags | |
4887 | lwz r8,pmapvr+4(r3) ; Get the v to r translation | |
91447636 A |
4888 | mfsprg r6,1 ; Get the current activation |
4889 | lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block | |
55e303ae A |
4890 | lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address |
4891 | mtcrf 0x80,r4 ; Get the Altivec flag | |
4892 | xor r4,r3,r8 ; Get bottom of the real address of bmap anchor | |
4893 | cmplw cr1,r3,r2 ; Same address space as before? | |
4894 | stw r7,ppUserPmap(r6) ; Show our real pmap address | |
4895 | crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine | |
4896 | stw r4,ppUserPmap+4(r6) ; Show our real pmap address | |
4897 | stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address | |
4898 | beqlr-- cr1 ; Leave if the same | |
4899 | ||
4900 | dssall ; Need to kill all data streams if adrsp changed | |
4901 | sync | |
4902 | blr ; Return... | |
4903 | ||
4904 | /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry | |
4905 | * | |
4906 | * Lock must already be held on mapping block list | |
4907 | * returns 0 if all slots filled. | |
4908 | * returns n if a slot is found and it is not the last | |
4909 | * returns -n if a slot is found and it is the last | |
4910 | * when n and -n are returned, the corresponding bit is cleared | |
4911 | * the mapping is zeroed out before return | |
4912 | * | |
4913 | */ | |
4914 | ||
4915 | .align 5 | |
4916 | .globl EXT(mapalc1) | |
4917 | ||
4918 | LEXT(mapalc1) | |
4919 | lwz r4,mbfree(r3) ; Get the 1st mask | |
4920 | lis r0,0x8000 ; Get the mask to clear the first free bit | |
4921 | lwz r5,mbfree+4(r3) ; Get the 2nd mask | |
4922 | mr r12,r3 ; Save the block ptr | |
4923 | cntlzw r3,r4 ; Get first 1-bit in 1st word | |
4924 | srw. r9,r0,r3 ; Get bit corresponding to first free one | |
4925 | cntlzw r10,r5 ; Get first free field in second word | |
4926 | andc r4,r4,r9 ; Turn 1-bit off in 1st word | |
4927 | bne mapalc1f ; Found one in 1st word | |
4928 | ||
4929 | srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word | |
4930 | li r3,0 ; assume failure return | |
4931 | andc r5,r5,r9 ; Turn it off | |
4932 | beqlr-- ; There are no 1 bits left... | |
4933 | addi r3,r10,32 ; set the correct number | |
4934 | ||
4935 | mapalc1f: | |
4936 | or. r0,r4,r5 ; any more bits set? | |
4937 | stw r4,mbfree(r12) ; update bitmasks | |
4938 | stw r5,mbfree+4(r12) | |
4939 | ||
4940 | slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block | |
4941 | addi r7,r6,32 | |
4942 | dcbz r6,r12 ; clear the 64-byte mapping | |
4943 | dcbz r7,r12 | |
4944 | ||
4945 | bnelr++ ; return if another bit remains set | |
4946 | ||
4947 | neg r3,r3 ; indicate we just returned the last bit | |
4948 | blr | |
4949 | ||
4950 | ||
4951 | /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry | |
4952 | * | |
4953 | * Lock must already be held on mapping block list | |
4954 | * returns 0 if all slots filled. | |
4955 | * returns n if a slot is found and it is not the last | |
4956 | * returns -n if a slot is found and it is the last | |
4957 | * when n and -n are returned, the corresponding bits are cleared | |
4958 | * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)). | |
4959 | * the mapping is zero'd out before return | |
4960 | */ | |
4961 | ||
4962 | .align 5 | |
4963 | .globl EXT(mapalc2) | |
4964 | LEXT(mapalc2) | |
4965 | lwz r4,mbfree(r3) ; Get the first mask | |
4966 | lis r0,0x8000 ; Get the mask to clear the first free bit | |
4967 | lwz r5,mbfree+4(r3) ; Get the second mask | |
4968 | mr r12,r3 ; Save the block ptr | |
4969 | slwi r6,r4,1 ; shift first word over | |
4970 | and r6,r4,r6 ; lite start of double bit runs in 1st word | |
4971 | slwi r7,r5,1 ; shift 2nd word over | |
4972 | cntlzw r3,r6 ; Get first free 2-bit run in 1st word | |
4973 | and r7,r5,r7 ; lite start of double bit runs in 2nd word | |
4974 | srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word | |
4975 | cntlzw r10,r7 ; Get first free field in second word | |
4976 | srwi r11,r9,1 ; shift over for 2nd bit in 1st word | |
4977 | andc r4,r4,r9 ; Turn off 1st bit in 1st word | |
4978 | andc r4,r4,r11 ; turn off 2nd bit in 1st word | |
4979 | bne mapalc2a ; Found two consecutive free bits in 1st word | |
4980 | ||
4981 | srw. r9,r0,r10 ; Get bit corresponding to first free one in second word | |
4982 | li r3,0 ; assume failure | |
4983 | srwi r11,r9,1 ; get mask for 2nd bit | |
4984 | andc r5,r5,r9 ; Turn off 1st bit in 2nd word | |
4985 | andc r5,r5,r11 ; turn off 2nd bit in 2nd word | |
4986 | beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either | |
4987 | addi r3,r10,32 ; set the correct number | |
4988 | ||
4989 | mapalc2a: | |
4990 | or. r0,r4,r5 ; any more bits set? | |
4991 | stw r4,mbfree(r12) ; update bitmasks | |
4992 | stw r5,mbfree+4(r12) | |
4993 | slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block | |
4994 | addi r7,r6,32 | |
4995 | addi r8,r6,64 | |
4996 | addi r9,r6,96 | |
4997 | dcbz r6,r12 ; zero out the 128-byte mapping | |
4998 | dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines | |
4999 | dcbz r8,r12 ; because the mapping may not be 128-byte aligned | |
5000 | dcbz r9,r12 | |
5001 | ||
5002 | bnelr++ ; return if another bit remains set | |
5003 | ||
5004 | neg r3,r3 ; indicate we just returned the last bit | |
5005 | blr | |
5006 | ||
5007 | mapalc2c: | |
5008 | rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31 | |
5009 | and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free? | |
5010 | beqlr ; no, we failed | |
5011 | rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word | |
5012 | rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word | |
5013 | li r3,31 ; get index of this field | |
5014 | b mapalc2a | |
5015 | ||
5016 | ||
5017 | ; | |
5018 | ; This routine initialzes the hash table and PCA. | |
5019 | ; It is done here because we may need to be 64-bit to do it. | |
5020 | ; | |
5021 | ||
5022 | .align 5 | |
5023 | .globl EXT(hw_hash_init) | |
5024 | ||
5025 | LEXT(hw_hash_init) | |
5026 | ||
5027 | mfsprg r10,2 ; Get feature flags | |
5028 | lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address | |
5029 | mtcrf 0x02,r10 ; move pf64Bit to cr6 | |
5030 | lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address | |
5031 | lis r4,0xFF01 ; Set all slots free and start steal at end | |
5032 | ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address | |
5033 | ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address | |
5034 | ||
5035 | lwz r12,0(r12) ; Get hash table size | |
5036 | li r3,0 ; Get start | |
5037 | bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint) | |
5038 | ||
5039 | lwz r11,4(r11) ; Get hash table base | |
5040 | ||
5041 | hhiNext32: cmplw r3,r12 ; Have we reached the end? | |
5042 | bge- hhiCPCA32 ; Yes... | |
5043 | dcbz r3,r11 ; Clear the line | |
5044 | addi r3,r3,32 ; Next one... | |
5045 | b hhiNext32 ; Go on... | |
5046 | ||
5047 | hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4 | |
5048 | li r3,-4 ; Displacement to first PCA entry | |
5049 | neg r12,r12 ; Get negative end of PCA | |
5050 | ||
5051 | hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry | |
5052 | subi r3,r3,4 ; Next slot | |
5053 | cmpw r3,r12 ; Have we finished? | |
5054 | bge+ hhiNPCA32 ; Not yet... | |
5055 | blr ; Leave... | |
5056 | ||
5057 | hhiSF: mfmsr r9 ; Save the MSR | |
5058 | li r8,1 ; Get a 1 | |
5059 | mr r0,r9 ; Get a copy of the MSR | |
5060 | ld r11,0(r11) ; Get hash table base | |
5061 | rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0) | |
5062 | mtmsrd r0 ; Turn on SF | |
5063 | isync | |
5064 | ||
5065 | ||
5066 | hhiNext64: cmpld r3,r12 ; Have we reached the end? | |
5067 | bge-- hhiCPCA64 ; Yes... | |
5068 | dcbz128 r3,r11 ; Clear the line | |
5069 | addi r3,r3,128 ; Next one... | |
5070 | b hhiNext64 ; Go on... | |
5071 | ||
5072 | hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4 | |
5073 | li r3,-4 ; Displacement to first PCA entry | |
5074 | neg r12,r12 ; Get negative end of PCA | |
5075 | ||
5076 | hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry | |
5077 | subi r3,r3,4 ; Next slot | |
5078 | cmpd r3,r12 ; Have we finished? | |
5079 | bge++ hhiNPCA64 ; Not yet... | |
5080 | ||
5081 | mtmsrd r9 ; Turn off SF if it was off | |
5082 | isync | |
5083 | blr ; Leave... | |
5084 | ||
5085 | ||
5086 | ; | |
5087 | ; This routine sets up the hardware to start translation. | |
5088 | ; Note that we do NOT start translation. | |
5089 | ; | |
5090 | ||
5091 | .align 5 | |
5092 | .globl EXT(hw_setup_trans) | |
5093 | ||
5094 | LEXT(hw_setup_trans) | |
5095 | ||
5096 | mfsprg r11,0 ; Get the per_proc block | |
5097 | mfsprg r12,2 ; Get feature flags | |
5098 | li r0,0 ; Get a 0 | |
5099 | li r2,1 ; And a 1 | |
5100 | mtcrf 0x02,r12 ; Move pf64Bit to cr6 | |
5101 | stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid | |
5102 | stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux | |
5103 | sth r2,ppInvSeg(r11) ; Force a reload of the SRs | |
5104 | sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel | |
5105 | ||
5106 | bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint) | |
5107 | ||
5108 | li r9,0 ; Clear out a register | |
5109 | sync | |
5110 | isync | |
5111 | mtdbatu 0,r9 ; Invalidate maps | |
5112 | mtdbatl 0,r9 ; Invalidate maps | |
5113 | mtdbatu 1,r9 ; Invalidate maps | |
5114 | mtdbatl 1,r9 ; Invalidate maps | |
5115 | mtdbatu 2,r9 ; Invalidate maps | |
5116 | mtdbatl 2,r9 ; Invalidate maps | |
5117 | mtdbatu 3,r9 ; Invalidate maps | |
5118 | mtdbatl 3,r9 ; Invalidate maps | |
5119 | ||
5120 | mtibatu 0,r9 ; Invalidate maps | |
5121 | mtibatl 0,r9 ; Invalidate maps | |
5122 | mtibatu 1,r9 ; Invalidate maps | |
5123 | mtibatl 1,r9 ; Invalidate maps | |
5124 | mtibatu 2,r9 ; Invalidate maps | |
5125 | mtibatl 2,r9 ; Invalidate maps | |
5126 | mtibatu 3,r9 ; Invalidate maps | |
5127 | mtibatl 3,r9 ; Invalidate maps | |
5128 | ||
5129 | lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address | |
5130 | lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address | |
5131 | ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address | |
5132 | ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address | |
5133 | lwz r11,4(r11) ; Get hash table base | |
5134 | lwz r12,0(r12) ; Get hash table size | |
5135 | subi r12,r12,1 ; Back off by 1 | |
5136 | rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image | |
5137 | ||
5138 | mtsdr1 r11 ; Ok, we now have the hash table set up | |
5139 | sync | |
5140 | ||
5141 | li r12,invalSpace ; Get the invalid segment value | |
5142 | li r10,0 ; Start low | |
5143 | ||
5144 | hstsetsr: mtsrin r12,r10 ; Set the SR | |
5145 | addis r10,r10,0x1000 ; Bump the segment | |
5146 | mr. r10,r10 ; Are we finished? | |
5147 | bne+ hstsetsr ; Nope... | |
5148 | sync | |
5149 | blr ; Return... | |
5150 | ||
5151 | ; | |
5152 | ; 64-bit version | |
5153 | ; | |
5154 | ||
5155 | hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address | |
5156 | lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address | |
5157 | ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address | |
5158 | ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address | |
5159 | ld r11,0(r11) ; Get hash table base | |
5160 | lwz r12,0(r12) ; Get hash table size | |
5161 | cntlzw r10,r12 ; Get the number of bits | |
5162 | subfic r10,r10,13 ; Get the extra bits we need | |
5163 | or r11,r11,r10 ; Add the size field to SDR1 | |
5164 | ||
5165 | mtsdr1 r11 ; Ok, we now have the hash table set up | |
5166 | sync | |
5167 | ||
5168 | li r0,0 ; Set an SLB slot index of 0 | |
5169 | slbia ; Trash all SLB entries (except for entry 0 that is) | |
5170 | slbmfee r7,r0 ; Get the entry that is in SLB index 0 | |
5171 | rldicr r7,r7,0,35 ; Clear the valid bit and the rest | |
5172 | slbie r7 ; Invalidate it | |
5173 | ||
5174 | blr ; Return... | |
5175 | ||
5176 | ||
5177 | ; | |
5178 | ; This routine turns on translation for the first time on a processor | |
5179 | ; | |
5180 | ||
5181 | .align 5 | |
5182 | .globl EXT(hw_start_trans) | |
5183 | ||
5184 | LEXT(hw_start_trans) | |
5185 | ||
5186 | ||
5187 | mfmsr r10 ; Get the msr | |
5188 | ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation | |
5189 | ||
5190 | mtmsr r10 ; Everything falls apart here | |
5191 | isync | |
5192 | ||
5193 | blr ; Back to it. | |
5194 | ||
5195 | ||
5196 | ||
5197 | ; | |
5198 | ; This routine validates a segment register. | |
5199 | ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va) | |
5200 | ; | |
5201 | ; r3 = virtual pmap | |
5202 | ; r4 = segment[0:31] | |
5203 | ; r5 = segment[32:63] | |
5204 | ; r6 = va[0:31] | |
5205 | ; r7 = va[32:63] | |
5206 | ; | |
5207 | ; Note that we transform the addr64_t (long long) parameters into single 64-bit values. | |
5208 | ; Note that there is no reason to apply the key modifier here because this is only | |
5209 | ; used for kernel accesses. | |
5210 | ; | |
5211 | ||
5212 | .align 5 | |
5213 | .globl EXT(hw_map_seg) | |
5214 | ||
5215 | LEXT(hw_map_seg) | |
5216 | ||
5217 | lwz r0,pmapSpace(r3) ; Get the space, we will need it soon | |
5218 | lwz r9,pmapFlags(r3) ; Get the flags for the keys now | |
5219 | mfsprg r10,2 ; Get feature flags | |
55e303ae A |
5220 | |
5221 | ; | |
5222 | ; Note: the following code would problably be easier to follow if I split it, | |
5223 | ; but I just wanted to see if I could write this to work on both 32- and 64-bit | |
5224 | ; machines combined. | |
5225 | ; | |
5226 | ||
5227 | ; | |
5228 | ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines) | |
5229 | ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines) | |
5230 | ||
5231 | rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit | |
5232 | rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID | |
5233 | mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6 | |
5234 | srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest | |
5235 | rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25] | |
5236 | rlwimi r0,r0,14,4,17 ; Dup address space ID above itself | |
5237 | rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines) | |
5238 | rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half | |
5239 | rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32 | |
5240 | rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines) | |
5241 | ||
5242 | rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space | |
5243 | ; concatenated together. There is garbage | |
5244 | ; at the top for 64-bit but we will clean | |
5245 | ; that out later. | |
5246 | rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit | |
5247 | ||
5248 | ||
5249 | ; | |
5250 | ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or | |
5251 | ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines | |
5252 | ; | |
5253 | ||
5254 | ; | |
5255 | ; What we have now is: | |
5256 | ; | |
5257 | ; 0 0 1 2 3 4 4 5 6 | |
5258 | ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines | |
5259 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5260 | ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value | |
5261 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5262 | ; 0 0 1 2 3 - for 32-bit machines | |
5263 | ; 0 8 6 4 1 | |
5264 | ; | |
5265 | ; 0 0 1 2 3 4 4 5 6 | |
5266 | ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines | |
5267 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5268 | ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA | |
5269 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5270 | ; 0 0 1 2 3 - for 32-bit machines | |
5271 | ; 0 8 6 4 1 | |
5272 | ; | |
5273 | ; 0 0 1 2 3 4 4 5 6 | |
5274 | ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines | |
5275 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5276 | ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment | |
5277 | ; +--------+--------+--------+--------+--------+--------+--------+--------+ | |
5278 | ; 0 0 1 2 3 - for 32-bit machines | |
5279 | ; 0 8 6 4 1 | |
5280 | ||
5281 | ||
5282 | xor r8,r8,r2 ; Calculate VSID | |
5283 | ||
5284 | bf-- pf64Bitb,hms32bit ; Skip out if 32-bit... | |
91447636 | 5285 | mfsprg r12,0 ; Get the per_proc |
55e303ae A |
5286 | li r0,1 ; Prepare to set bit 0 (also to clear EE) |
5287 | mfmsr r6 ; Get current MSR | |
5288 | li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits | |
5289 | mtmsrd r0,1 ; Set only the EE bit to 0 | |
5290 | rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on | |
5291 | mfmsr r11 ; Get the MSR right now, after disabling EE | |
5292 | andc r2,r11,r2 ; Turn off translation now | |
5293 | rldimi r2,r0,63,0 ; Get bit 64-bit turned on | |
5294 | or r11,r11,r6 ; Turn on the EE bit if it was on | |
5295 | mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on | |
5296 | isync ; Hang out a bit | |
5297 | ||
5298 | ld r6,validSegs(r12) ; Get the valid SLB entry flags | |
5299 | sldi r9,r9,9 ; Position the key and noex bit | |
5300 | ||
5301 | rldimi r5,r8,12,0 ; Form the VSID/key | |
5302 | ||
5303 | not r3,r6 ; Make valids be 0s | |
5304 | ||
5305 | cntlzd r7,r3 ; Find a free SLB | |
5306 | cmplwi r7,63 ; Did we find a free SLB entry? | |
5307 | ||
5308 | slbie r4 ; Since this ESID may still be in an SLBE, kill it | |
5309 | ||
5310 | oris r4,r4,0x0800 ; Turn on the valid bit in ESID | |
5311 | addi r7,r7,1 ; Make sure we skip slb 0 | |
5312 | blt++ hmsFreeSeg ; Yes, go load it... | |
5313 | ||
5314 | ; | |
5315 | ; No free SLB entries, select one that is in use and invalidate it | |
5316 | ; | |
5317 | lwz r2,ppSegSteal(r12) ; Get the next slot to steal | |
5318 | addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only | |
5319 | addi r2,r2,1 ; Set next slot to steal | |
5320 | slbmfee r3,r7 ; Get the entry that is in the selected spot | |
5321 | subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap | |
5322 | rldicr r3,r3,0,35 ; Clear the valid bit and the rest | |
5323 | srawi r8,r8,31 ; Get -1 if steal index still in range | |
5324 | slbie r3 ; Invalidate the in-use SLB entry | |
5325 | and r2,r2,r8 ; Reset steal index when it should wrap | |
5326 | isync ; | |
5327 | ||
5328 | stw r2,ppSegSteal(r12) ; Set the next slot to steal | |
5329 | ; | |
5330 | ; We are now ready to stick the SLB entry in the SLB and mark it in use | |
5331 | ; | |
5332 | ||
5333 | hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0 | |
5334 | rldimi r4,r7,0,58 ; Copy in the SLB entry selector | |
5335 | srd r0,r0,r2 ; Set bit mask for allocation | |
5336 | rldicl r5,r5,0,15 ; Clean out the unsupported bits | |
5337 | or r6,r6,r0 ; Turn on the allocation flag | |
5338 | ||
5339 | slbmte r5,r4 ; Make that SLB entry | |
5340 | ||
5341 | std r6,validSegs(r12) ; Mark as valid | |
5342 | mtmsrd r11 ; Restore the MSR | |
5343 | isync | |
5344 | blr ; Back to it... | |
5345 | ||
5346 | .align 5 | |
5347 | ||
91447636 A |
5348 | hms32bit: |
5349 | mfsprg r12,1 ; Get the current activation | |
5350 | lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block | |
5351 | rlwinm r8,r8,0,8,31 ; Clean up the VSID | |
55e303ae A |
5352 | rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting |
5353 | lis r0,0x8000 ; Set bit 0 | |
5354 | rlwimi r8,r9,28,1,3 ; Insert the keys and N bit | |
5355 | srw r0,r0,r2 ; Get bit corresponding to SR | |
5356 | addi r7,r12,validSegs ; Point to the valid segment flags directly | |
5357 | ||
5358 | mtsrin r8,r4 ; Set the actual SR | |
5359 | isync ; Need to make sure this is done | |
5360 | ||
5361 | hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags | |
5362 | or r6,r6,r0 ; Show that SR is valid | |
5363 | stwcx. r6,0,r7 ; Set the valid SR flags | |
5364 | bne-- hmsrupt ; Had an interrupt, need to get flags again... | |
5365 | ||
5366 | blr ; Back to it... | |
5367 | ||
5368 | ||
5369 | ; | |
5370 | ; This routine invalidates a segment register. | |
5371 | ; | |
5372 | ||
5373 | .align 5 | |
5374 | .globl EXT(hw_blow_seg) | |
5375 | ||
5376 | LEXT(hw_blow_seg) | |
5377 | ||
5378 | mfsprg r10,2 ; Get feature flags | |
55e303ae A |
5379 | mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6 |
5380 | ||
55e303ae A |
5381 | rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean |
5382 | ||
5383 | bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit... | |
5384 | ||
5385 | li r0,1 ; Prepare to set bit 0 (also to clear EE) | |
5386 | mfmsr r6 ; Get current MSR | |
5387 | li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits | |
5388 | mtmsrd r0,1 ; Set only the EE bit to 0 | |
5389 | rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on | |
5390 | mfmsr r11 ; Get the MSR right now, after disabling EE | |
5391 | andc r2,r11,r2 ; Turn off translation now | |
5392 | rldimi r2,r0,63,0 ; Get bit 64-bit turned on | |
5393 | or r11,r11,r6 ; Turn on the EE bit if it was on | |
5394 | mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on | |
5395 | isync ; Hang out a bit | |
5396 | ||
5397 | rldimi r9,r3,32,0 ; Insert the top part of the ESID | |
5398 | ||
5399 | slbie r9 ; Invalidate the associated SLB entry | |
5400 | ||
5401 | mtmsrd r11 ; Restore the MSR | |
5402 | isync | |
5403 | blr ; Back to it. | |
5404 | ||
5405 | .align 5 | |
5406 | ||
91447636 A |
5407 | hbs32bit: |
5408 | mfsprg r12,1 ; Get the current activation | |
5409 | lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block | |
5410 | addi r7,r12,validSegs ; Point to the valid segment flags directly | |
5411 | lwarx r4,0,r7 ; Get and reserve the valid segment flags | |
55e303ae A |
5412 | rlwinm r6,r9,4,28,31 ; Convert segment to number |
5413 | lis r2,0x8000 ; Set up a mask | |
5414 | srw r2,r2,r6 ; Make a mask | |
5415 | and. r0,r4,r2 ; See if this is even valid | |
5416 | li r5,invalSpace ; Set the invalid address space VSID | |
5417 | beqlr ; Leave if already invalid... | |
5418 | ||
5419 | mtsrin r5,r9 ; Slam the segment register | |
5420 | isync ; Need to make sure this is done | |
5421 | ||
5422 | hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment | |
5423 | stwcx. r4,0,r7 ; Set the valid SR flags | |
5424 | beqlr++ ; Stored ok, no interrupt, time to leave... | |
5425 | ||
5426 | lwarx r4,0,r7 ; Get and reserve the valid segment flags again | |
5427 | b hbsrupt ; Try again... | |
5428 | ||
5429 | ; | |
5430 | ; This routine invadates the entire pmap segment cache | |
5431 | ; | |
5432 | ; Translation is on, interrupts may or may not be enabled. | |
5433 | ; | |
5434 | ||
5435 | .align 5 | |
5436 | .globl EXT(invalidateSegs) | |
5437 | ||
5438 | LEXT(invalidateSegs) | |
5439 | ||
5440 | la r10,pmapCCtl(r3) ; Point to the segment cache control | |
5441 | eqv r2,r2,r2 ; Get all foxes | |
5442 | ||
5443 | isInv: lwarx r4,0,r10 ; Get the segment cache control value | |
5444 | rlwimi r4,r2,0,0,15 ; Slam in all invalid bits | |
5445 | rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? | |
5446 | bne-- isInv0 ; Yes, try again... | |
5447 | ||
5448 | stwcx. r4,0,r10 ; Try to invalidate it | |
5449 | bne-- isInv ; Someone else just stuffed it... | |
5450 | blr ; Leave... | |
5451 | ||
5452 | ||
5453 | isInv0: li r4,lgKillResv ; Get reservation kill zone | |
5454 | stwcx. r4,0,r4 ; Kill reservation | |
5455 | ||
5456 | isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control | |
5457 | rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked? | |
5458 | bne-- isInv ; Nope... | |
5459 | b isInv1 ; Still locked do it again... | |
5460 | ||
5461 | ; | |
5462 | ; This routine switches segment registers between kernel and user. | |
5463 | ; We have some assumptions and rules: | |
5464 | ; We are in the exception vectors | |
5465 | ; pf64Bitb is set up | |
5466 | ; R3 contains the MSR we going to | |
5467 | ; We can not use R4, R13, R20, R21, R29 | |
5468 | ; R13 is the savearea | |
5469 | ; R29 has the per_proc | |
5470 | ; | |
5471 | ; We return R3 as 0 if we did not switch between kernel and user | |
5472 | ; We also maintain and apply the user state key modifier used by VMM support; | |
5473 | ; If we go to the kernel it is set to 0, otherwise it follows the bit | |
5474 | ; in spcFlags. | |
5475 | ; | |
5476 | ||
d7e50217 | 5477 | .align 5 |
55e303ae | 5478 | .globl EXT(switchSegs) |
1c79356b | 5479 | |
55e303ae A |
5480 | LEXT(switchSegs) |
5481 | ||
5482 | lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator) | |
5483 | lwz r9,spcFlags(r29) ; Pick up the special user state flags | |
5484 | rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit | |
5485 | rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit | |
5486 | lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel | |
5487 | or r2,r2,r3 ; This will 1 if we will be using user segments | |
5488 | li r3,0 ; Get a selection mask | |
5489 | cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg | |
5490 | ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address) | |
5491 | sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user | |
5492 | la r19,ppUserPmap(r29) ; Point to the current user pmap | |
5493 | ||
5494 | ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice | |
5495 | rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key | |
5496 | ||
5497 | andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise | |
5498 | and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise | |
5499 | and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise | |
5500 | or r8,r8,r19 ; Get the pointer to the pmap we are using | |
5501 | ||
5502 | beqlr ; We are staying in the same mode, do not touch segs... | |
5503 | ||
5504 | lwz r28,0(r8) ; Get top half of pmap address | |
5505 | lwz r10,4(r8) ; Get bottom half | |
5506 | ||
5507 | stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg | |
5508 | rlwinm r28,r28,0,1,0 ; Copy top to top | |
5509 | stw r30,ppMapFlags(r29) ; Set the key modifier | |
5510 | rlwimi r28,r10,0,0,31 ; Insert bottom | |
5511 | ||
5512 | la r10,pmapCCtl(r28) ; Point to the segment cache control | |
5513 | la r9,pmapSegCache(r28) ; Point to the segment cache | |
5514 | ||
5515 | ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control | |
5516 | rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock? | |
5517 | ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit | |
5518 | bne-- ssgLock0 ; Yup, this is in use... | |
5519 | ||
5520 | stwcx. r16,0,r10 ; Try to set the lock | |
5521 | bne-- ssgLock ; Did we get contention? | |
5522 | ||
5523 | not r11,r15 ; Invert the invalids to valids | |
5524 | li r17,0 ; Set a mask for the SRs we are loading | |
5525 | isync ; Make sure we are all caught up | |
5526 | ||
5527 | bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it... | |
5528 | ||
5529 | li r0,0 ; Clear | |
5530 | slbia ; Trash all SLB entries (except for entry 0 that is) | |
5531 | li r17,1 ; Get SLB index to load (skip slb 0) | |
5532 | oris r0,r0,0x8000 ; Get set for a mask | |
5533 | b ssg64Enter ; Start on a cache line... | |
d7e50217 A |
5534 | |
5535 | .align 5 | |
d7e50217 | 5536 | |
55e303ae A |
5537 | ssgLock0: li r15,lgKillResv ; Killing field |
5538 | stwcx. r15,0,r15 ; Kill reservation | |
d7e50217 | 5539 | |
55e303ae A |
5540 | ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls |
5541 | rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock? | |
5542 | beq++ ssgLock ; Yup, this is in use... | |
5543 | b ssgLock1 ; Nope, try again... | |
5544 | ; | |
5545 | ; This is the 32-bit address space switch code. | |
5546 | ; We take a reservation on the segment cache and walk through. | |
5547 | ; For each entry, we load the specified entries and remember which | |
5548 | ; we did with a mask. Then, we figure out which segments should be | |
5549 | ; invalid and then see which actually are. Then we load those with the | |
5550 | ; defined invalid VSID. | |
5551 | ; Afterwards, we unlock the segment cache. | |
5552 | ; | |
d7e50217 | 5553 | |
55e303ae A |
5554 | .align 5 |
5555 | ||
5556 | ssg32Enter: cntlzw r12,r11 ; Find the next slot in use | |
5557 | cmplwi r12,pmapSegCacheUse ; See if we are done | |
5558 | slwi r14,r12,4 ; Index to the cache slot | |
5559 | lis r0,0x8000 ; Get set for a mask | |
5560 | add r14,r14,r9 ; Point to the entry | |
5561 | ||
5562 | bge- ssg32Done ; All done... | |
5563 | ||
5564 | lwz r5,sgcESID+4(r14) ; Get the ESID part | |
5565 | srw r2,r0,r12 ; Form a mask for the one we are loading | |
5566 | lwz r7,sgcVSID+4(r14) ; And get the VSID bottom | |
5567 | ||
5568 | andc r11,r11,r2 ; Clear the bit | |
5569 | lwz r6,sgcVSID(r14) ; And get the VSID top | |
5570 | ||
5571 | rlwinm r2,r5,4,28,31 ; Change the segment number to a number | |
5572 | ||
5573 | xor r7,r7,r30 ; Modify the key before we actually set it | |
5574 | srw r0,r0,r2 ; Get a mask for the SR we are loading | |
5575 | rlwinm r8,r7,19,1,3 ; Insert the keys and N bit | |
5576 | or r17,r17,r0 ; Remember the segment | |
5577 | rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID | |
5578 | rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents | |
5579 | ||
5580 | mtsrin r8,r5 ; Load the segment | |
5581 | b ssg32Enter ; Go enter the next... | |
5582 | ||
5583 | .align 5 | |
5584 | ||
5585 | ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags | |
5586 | stw r15,pmapCCtl(r28) ; Unlock the segment cache controls | |
5587 | ||
5588 | lis r0,0x8000 ; Get set for a mask | |
5589 | li r2,invalSpace ; Set the invalid address space VSID | |
5590 | ||
5591 | nop ; Align loop | |
5592 | nop ; Align loop | |
5593 | andc r16,r16,r17 ; Get list of SRs that were valid before but not now | |
5594 | nop ; Align loop | |
5595 | ||
5596 | ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate | |
5597 | cmplwi r18,16 ; Have we finished? | |
5598 | srw r22,r0,r18 ; Get the mask bit | |
5599 | rlwinm r23,r18,28,0,3 ; Get the segment register we need | |
5600 | andc r16,r16,r22 ; Get rid of the guy we just did | |
5601 | bge ssg32Really ; Yes, we are really done now... | |
5602 | ||
5603 | mtsrin r2,r23 ; Invalidate the SR | |
5604 | b ssg32Inval ; Do the next... | |
5605 | ||
5606 | .align 5 | |
5607 | ||
5608 | ssg32Really: | |
5609 | stw r17,validSegs(r29) ; Set the valid SR flags | |
5610 | li r3,1 ; Set kernel/user transition | |
5611 | blr | |
5612 | ||
5613 | ; | |
5614 | ; This is the 64-bit address space switch code. | |
5615 | ; First we blow away all of the SLB entries. | |
5616 | ; Walk through, | |
5617 | ; loading the SLB. Afterwards, we release the cache lock | |
5618 | ; | |
5619 | ; Note that because we have to treat SLBE 0 specially, we do not ever use it... | |
5620 | ; Its a performance thing... | |
5621 | ; | |
1c79356b A |
5622 | |
5623 | .align 5 | |
1c79356b | 5624 | |
55e303ae A |
5625 | ssg64Enter: cntlzw r12,r11 ; Find the next slot in use |
5626 | cmplwi r12,pmapSegCacheUse ; See if we are done | |
5627 | slwi r14,r12,4 ; Index to the cache slot | |
5628 | srw r16,r0,r12 ; Form a mask for the one we are loading | |
5629 | add r14,r14,r9 ; Point to the entry | |
5630 | andc r11,r11,r16 ; Clear the bit | |
5631 | bge-- ssg64Done ; All done... | |
5632 | ||
5633 | ld r5,sgcESID(r14) ; Get the ESID part | |
5634 | ld r6,sgcVSID(r14) ; And get the VSID part | |
5635 | oris r5,r5,0x0800 ; Turn on the valid bit | |
5636 | or r5,r5,r17 ; Insert the SLB slot | |
5637 | xor r6,r6,r30 ; Modify the key before we actually set it | |
5638 | addi r17,r17,1 ; Bump to the next slot | |
5639 | slbmte r6,r5 ; Make that SLB entry | |
5640 | b ssg64Enter ; Go enter the next... | |
1c79356b | 5641 | |
55e303ae | 5642 | .align 5 |
d7e50217 | 5643 | |
55e303ae | 5644 | ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls |
d7e50217 | 5645 | |
55e303ae A |
5646 | eqv r16,r16,r16 ; Load up with all foxes |
5647 | subfic r17,r17,64 ; Get the number of 1 bits we need | |
5648 | ||
5649 | sld r16,r16,r17 ; Get a mask for the used SLB entries | |
5650 | li r3,1 ; Set kernel/user transition | |
5651 | std r16,validSegs(r29) ; Set the valid SR flags | |
1c79356b A |
5652 | blr |
5653 | ||
55e303ae A |
5654 | ; |
5655 | ; mapSetUp - this function sets initial state for all mapping functions. | |
5656 | ; We turn off all translations (physical), disable interruptions, and | |
5657 | ; enter 64-bit mode if applicable. | |
5658 | ; | |
5659 | ; We also return the original MSR in r11, the feature flags in R12, | |
5660 | ; and CR6 set up so we can do easy branches for 64-bit | |
91447636 | 5661 | ; hw_clear_maps assumes r10, r9 will not be trashed. |
55e303ae A |
5662 | ; |
5663 | ||
5664 | .align 5 | |
5665 | .globl EXT(mapSetUp) | |
5666 | ||
5667 | LEXT(mapSetUp) | |
5668 | ||
5669 | lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask | |
5670 | mfsprg r12,2 ; Get feature flags | |
5671 | ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well | |
5672 | mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6 | |
5673 | mfmsr r11 ; Save the MSR | |
5674 | mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6 | |
5675 | andc r11,r11,r0 ; Clear VEC and FP for good | |
5676 | ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR | |
5677 | li r2,1 ; Prepare for 64 bit | |
5678 | andc r0,r11,r0 ; Clear the rest | |
5679 | bt pfNoMSRirb,msuNoMSR ; No MSR... | |
5680 | bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint) | |
d7e50217 | 5681 | |
55e303ae A |
5682 | mtmsr r0 ; Translation and all off |
5683 | isync ; Toss prefetch | |
5684 | blr ; Return... | |
5685 | ||
5686 | .align 5 | |
5687 | ||
5688 | msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0) | |
5689 | mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR | |
5690 | isync ; synchronize | |
5691 | blr ; Return... | |
5692 | ||
5693 | .align 5 | |
5694 | ||
5695 | msuNoMSR: mr r2,r3 ; Save R3 across call | |
5696 | mr r3,r0 ; Get the new MSR value | |
5697 | li r0,loadMSR ; Get the MSR setter SC | |
5698 | sc ; Set it | |
5699 | mr r3,r2 ; Restore R3 | |
5700 | blr ; Go back all set up... | |
5701 | ||
5702 | ||
91447636 A |
5703 | ; |
5704 | ; Guest shadow assist -- remove all guest mappings | |
5705 | ; | |
5706 | ; Remove all mappings for a guest pmap from the shadow hash table. | |
5707 | ; | |
5708 | ; Parameters: | |
5709 | ; r3 : address of pmap, 32-bit kernel virtual address | |
5710 | ; | |
5711 | ; Non-volatile register usage: | |
5712 | ; r24 : host pmap's physical address | |
5713 | ; r25 : VMM extension block's physical address | |
5714 | ; r26 : physent address | |
5715 | ; r27 : guest pmap's space ID number | |
5716 | ; r28 : current hash table page index | |
5717 | ; r29 : guest pmap's physical address | |
5718 | ; r30 : saved msr image | |
5719 | ; r31 : current mapping | |
5720 | ; | |
5721 | .align 5 | |
5722 | .globl EXT(hw_rem_all_gv) | |
5723 | ||
5724 | LEXT(hw_rem_all_gv) | |
5725 | ||
5726 | #define graStackSize ((31-24+1)*4)+4 | |
5727 | stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1) | |
5728 | ; Mint a new stack frame | |
5729 | mflr r0 ; Get caller's return address | |
5730 | mfsprg r11,2 ; Get feature flags | |
5731 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
5732 | stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
5733 | ; Save caller's return address | |
5734 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
5735 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
5736 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
5737 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
5738 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
5739 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
5740 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
5741 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
5742 | ||
5743 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
5744 | ||
5745 | bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine | |
5746 | lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr | |
5747 | lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt | |
5748 | lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr | |
5749 | b graStart ; Get to it | |
5750 | gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr | |
5751 | ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt | |
5752 | ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr | |
5753 | graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode | |
5754 | xor r29,r3,r9 ; Convert pmap_t virt->real | |
5755 | mr r30,r11 ; Save caller's msr image | |
5756 | ||
5757 | la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
5758 | bl sxlkExclusive ; Get lock exclusive | |
5759 | ||
5760 | lwz r3,vxsGra(r25) ; Get remove all count | |
5761 | addi r3,r3,1 ; Increment remove all count | |
5762 | stw r3,vxsGra(r25) ; Update remove all count | |
5763 | ||
5764 | li r28,0 ; r28 <- first hash page table index to search | |
5765 | lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number | |
5766 | graPgLoop: | |
5767 | la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index | |
5768 | rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK | |
5769 | ; Convert page index into page physical index offset | |
5770 | add r31,r31,r11 ; Calculate page physical index entry address | |
5771 | bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit | |
5772 | lwz r31,4(r31) ; r31 <- first slot in hash table page to examine | |
5773 | b graLoop ; Examine all slots in this page | |
5774 | gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine | |
5775 | b graLoop ; Examine all slots in this page | |
5776 | ||
5777 | .align 5 | |
5778 | graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags | |
5779 | lhz r4,mpSpace(r31) ; Get mapping's space ID number | |
5780 | rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag | |
5781 | xor r4,r4,r27 ; Compare space ID number | |
5782 | or. r0,r6,r4 ; cr0_eq <- !free && space id match | |
5783 | bne graMiss ; Not one of ours, skip it | |
5784 | ||
5785 | lwz r11,vxsGraHits(r25) ; Get remove hit count | |
5786 | addi r11,r11,1 ; Increment remove hit count | |
5787 | stw r11,vxsGraHits(r25) ; Update remove hit count | |
5788 | ||
5789 | rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant? | |
5790 | bne graRemPhys ; Yes, nothing to disconnect | |
5791 | ||
5792 | lwz r11,vxsGraActive(r25) ; Get remove active count | |
5793 | addi r11,r11,1 ; Increment remove hit count | |
5794 | stw r11,vxsGraActive(r25) ; Update remove hit count | |
5795 | ||
5796 | bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately | |
5797 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
5798 | ; r31 <- mapping's physical address | |
5799 | ; r3 -> PTE slot physical address | |
5800 | ; r4 -> High-order 32 bits of PTE | |
5801 | ; r5 -> Low-order 32 bits of PTE | |
5802 | ; r6 -> PCA | |
5803 | ; r7 -> PCA physical address | |
5804 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
5805 | b graFreePTE ; Join 64-bit path to release the PTE | |
5806 | graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
5807 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
5808 | graFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
5809 | beq- graRemPhys ; No valid PTE, we're almost done | |
5810 | lis r0,0x8000 ; Prepare free bit for this slot | |
5811 | srw r0,r0,r2 ; Position free bit | |
5812 | or r6,r6,r0 ; Set it in our PCA image | |
5813 | lwz r8,mpPte(r31) ; Get PTE pointer | |
5814 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
5815 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
5816 | eieio ; Synchronize all previous updates (mapInvPtexx doesn't) | |
5817 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
5818 | ||
5819 | graRemPhys: | |
5820 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
5821 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
5822 | mr. r26,r3 ; Got lock on our physent? | |
5823 | beq-- graBadPLock ; No, time to bail out | |
5824 | ||
5825 | crset cr1_eq ; cr1_eq <- previous link is the anchor | |
5826 | bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine | |
5827 | la r11,ppLink+4(r26) ; Point to chain anchor | |
5828 | lwz r9,ppLink+4(r26) ; Get chain anchor | |
5829 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
5830 | ||
5831 | graRemLoop: beq- graRemoveMiss ; End of chain, this is not good | |
5832 | cmplw r9,r31 ; Is this the mapping to remove? | |
5833 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
5834 | bne graRemNext ; No, chain onward | |
5835 | bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor | |
5836 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
5837 | b graRemoved ; Exit loop | |
5838 | graRemRetry: | |
5839 | lwarx r0,0,r11 ; Get previous link | |
5840 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
5841 | stwcx. r0,0,r11 ; Update previous link | |
5842 | bne- graRemRetry ; Lost reservation, retry | |
5843 | b graRemoved ; Good work, let's get outta here | |
5844 | ||
5845 | graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
5846 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
5847 | mr. r9,r8 ; Does next entry exist? | |
5848 | b graRemLoop ; Carry on | |
5849 | ||
5850 | graRemove64: | |
5851 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
5852 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
5853 | la r11,ppLink(r26) ; Point to chain anchor | |
5854 | ld r9,ppLink(r26) ; Get chain anchor | |
5855 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
5856 | graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good | |
5857 | cmpld r9,r31 ; Is this the mapping to remove? | |
5858 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
5859 | bne graRem64Nxt ; Not mapping to remove, chain on, dude | |
5860 | bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor | |
5861 | std r8,0(r11) ; Unchain gpv->phys mapping | |
5862 | b graRemoved ; Exit loop | |
5863 | graRem64Rt: ldarx r0,0,r11 ; Get previous link | |
5864 | and r0,r0,r7 ; Get flags | |
5865 | or r0,r0,r8 ; Insert new forward pointer | |
5866 | stdcx. r0,0,r11 ; Slam it back in | |
5867 | bne-- graRem64Rt ; Lost reservation, retry | |
5868 | b graRemoved ; Good work, let's go home | |
5869 | ||
5870 | graRem64Nxt: | |
5871 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
5872 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
5873 | mr. r9,r8 ; Does next entry exist? | |
5874 | b graRem64Lp ; Carry on | |
5875 | ||
5876 | graRemoved: | |
5877 | mr r3,r26 ; r3 <- physent's address | |
5878 | bl mapPhysUnlock ; Unlock the physent (and its chain of mappings) | |
5879 | ||
5880 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
5881 | rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags | |
5882 | ori r3,r3,mpgFree ; Mark mapping free | |
5883 | stw r3,mpFlags(r31) ; Update flags | |
5884 | ||
5885 | graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping | |
5886 | rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page? | |
5887 | bne graLoop ; No, examine next slot | |
5888 | addi r28,r28,1 ; Increment hash table page index | |
5889 | cmplwi r28,GV_HPAGES ; End of hash table? | |
5890 | bne graPgLoop ; Examine next hash table page | |
5891 | ||
5892 | la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
5893 | bl sxlkUnlock ; Release host pmap's search lock | |
5894 | ||
5895 | bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately | |
5896 | mtmsr r30 ; Restore 'rupts, translation | |
5897 | isync ; Throw a small wrench into the pipeline | |
5898 | b graPopFrame ; Nothing to do now but pop a frame and return | |
5899 | graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode | |
5900 | graPopFrame: | |
5901 | lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
5902 | ; Get caller's return address | |
5903 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
5904 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
5905 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
5906 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
5907 | mtlr r0 ; Prepare return address | |
5908 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
5909 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
5910 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
5911 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
5912 | lwz r1,0(r1) ; Pop stack frame | |
5913 | blr ; Return to caller | |
5914 | ||
5915 | graBadPLock: | |
5916 | graRemoveMiss: | |
5917 | lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the | |
5918 | ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb? | |
5919 | li r3,failMapping ; The BOMB, Dmitri. | |
5920 | sc ; The hydrogen bomb. | |
5921 | ||
5922 | ||
5923 | ; | |
5924 | ; Guest shadow assist -- remove local guest mappings | |
5925 | ; | |
5926 | ; Remove local mappings for a guest pmap from the shadow hash table. | |
5927 | ; | |
5928 | ; Parameters: | |
5929 | ; r3 : address of guest pmap, 32-bit kernel virtual address | |
5930 | ; | |
5931 | ; Non-volatile register usage: | |
5932 | ; r20 : current active map word's physical address | |
5933 | ; r21 : current hash table page address | |
5934 | ; r22 : updated active map word in process | |
5935 | ; r23 : active map word in process | |
5936 | ; r24 : host pmap's physical address | |
5937 | ; r25 : VMM extension block's physical address | |
5938 | ; r26 : physent address | |
5939 | ; r27 : guest pmap's space ID number | |
5940 | ; r28 : current active map index | |
5941 | ; r29 : guest pmap's physical address | |
5942 | ; r30 : saved msr image | |
5943 | ; r31 : current mapping | |
5944 | ; | |
5945 | .align 5 | |
5946 | .globl EXT(hw_rem_local_gv) | |
5947 | ||
5948 | LEXT(hw_rem_local_gv) | |
5949 | ||
5950 | #define grlStackSize ((31-20+1)*4)+4 | |
5951 | stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1) | |
5952 | ; Mint a new stack frame | |
5953 | mflr r0 ; Get caller's return address | |
5954 | mfsprg r11,2 ; Get feature flags | |
5955 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
5956 | stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
5957 | ; Save caller's return address | |
5958 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
5959 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
5960 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
5961 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
5962 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
5963 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
5964 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
5965 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
5966 | stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23 | |
5967 | stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22 | |
5968 | stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21 | |
5969 | stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20 | |
5970 | ||
5971 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
5972 | ||
5973 | bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine | |
5974 | lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr | |
5975 | lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt | |
5976 | lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr | |
5977 | b grlStart ; Get to it | |
5978 | grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr | |
5979 | ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt | |
5980 | ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr | |
5981 | ||
5982 | grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode | |
5983 | xor r29,r3,r9 ; Convert pmap_t virt->real | |
5984 | mr r30,r11 ; Save caller's msr image | |
5985 | ||
5986 | la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
5987 | bl sxlkExclusive ; Get lock exclusive | |
5988 | ||
5989 | li r28,0 ; r28 <- index of first active map word to search | |
5990 | lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number | |
5991 | b grlMap1st ; Examine first map word | |
5992 | ||
5993 | .align 5 | |
5994 | grlNextMap: stw r22,0(r21) ; Save updated map word | |
5995 | addi r28,r28,1 ; Increment map word index | |
5996 | cmplwi r28,GV_MAP_WORDS ; See if we're done | |
5997 | beq grlDone ; Yup, let's get outta here | |
5998 | ||
5999 | grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array | |
6000 | rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK | |
6001 | ; Convert map index into map index offset | |
6002 | add r20,r20,r11 ; Calculate map array element address | |
6003 | lwz r22,0(r20) ; Get active map word at index | |
6004 | mr. r23,r22 ; Any active mappings indicated? | |
6005 | beq grlNextMap ; Nope, check next word | |
6006 | ||
6007 | la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index | |
6008 | rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK | |
6009 | ; Extract page index from map word index and convert | |
6010 | ; into page physical index offset | |
6011 | add r21,r21,r11 ; Calculate page physical index entry address | |
6012 | bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit | |
6013 | lwz r21,4(r21) ; Get selected hash table page's address | |
6014 | b grlLoop ; Examine all slots in this page | |
6015 | grl64Page: ld r21,0(r21) ; Get selected hash table page's address | |
6016 | b grlLoop ; Examine all slots in this page | |
6017 | ||
6018 | .align 5 | |
6019 | grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word | |
6020 | cmplwi r11,32 ; Any active mappings left in this word? | |
6021 | lis r12,0x8000 ; Prepare mask to reset bit | |
6022 | srw r12,r12,r11 ; Position mask bit | |
6023 | andc r23,r23,r12 ; Reset lit bit | |
6024 | beq grlNextMap ; No bits lit, examine next map word | |
6025 | ||
6026 | slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number | |
6027 | rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK | |
6028 | ; Extract slot band number from index and insert | |
6029 | add r31,r31,r21 ; Add hash page address yielding mapping slot address | |
6030 | ||
6031 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
6032 | lhz r4,mpSpace(r31) ; Get mapping's space ID number | |
6033 | rlwinm r5,r3,0,mpgGlobal ; Extract global bit | |
6034 | xor r4,r4,r27 ; Compare space ID number | |
6035 | or. r4,r4,r5 ; (space id miss || global) | |
6036 | bne grlLoop ; Not one of ours, skip it | |
6037 | andc r22,r22,r12 ; Reset active bit corresponding to this mapping | |
6038 | ori r3,r3,mpgDormant ; Mark entry dormant | |
6039 | stw r3,mpFlags(r31) ; Update mapping's flags | |
6040 | ||
6041 | bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately | |
6042 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6043 | ; r31 <- mapping's physical address | |
6044 | ; r3 -> PTE slot physical address | |
6045 | ; r4 -> High-order 32 bits of PTE | |
6046 | ; r5 -> Low-order 32 bits of PTE | |
6047 | ; r6 -> PCA | |
6048 | ; r7 -> PCA physical address | |
6049 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
6050 | b grlFreePTE ; Join 64-bit path to release the PTE | |
6051 | grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
6052 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
6053 | grlFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
6054 | beq- grlLoop ; No valid PTE, we're done with this mapping | |
6055 | lis r0,0x8000 ; Prepare free bit for this slot | |
6056 | srw r0,r0,r2 ; Position free bit | |
6057 | or r6,r6,r0 ; Set it in our PCA image | |
6058 | lwz r8,mpPte(r31) ; Get PTE pointer | |
6059 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
6060 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
6061 | eieio ; Synchronize all previous updates (mapInvPtexx doesn't) | |
6062 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
6063 | b grlLoop ; On to next active mapping in this map word | |
6064 | ||
6065 | grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock | |
6066 | bl sxlkUnlock ; Release host pmap's search lock | |
6067 | ||
6068 | bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately | |
6069 | mtmsr r30 ; Restore 'rupts, translation | |
6070 | isync ; Throw a small wrench into the pipeline | |
6071 | b grlPopFrame ; Nothing to do now but pop a frame and return | |
6072 | grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode | |
6073 | grlPopFrame: | |
6074 | lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6075 | ; Get caller's return address | |
6076 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
6077 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
6078 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
6079 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
6080 | mtlr r0 ; Prepare return address | |
6081 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
6082 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
6083 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
6084 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
6085 | lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23 | |
6086 | lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22 | |
6087 | lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21 | |
6088 | lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20 | |
6089 | lwz r1,0(r1) ; Pop stack frame | |
6090 | blr ; Return to caller | |
6091 | ||
6092 | ||
6093 | ; | |
6094 | ; Guest shadow assist -- resume a guest mapping | |
6095 | ; | |
6096 | ; Locates the specified dormant mapping, and if it exists validates it and makes it | |
6097 | ; active. | |
6098 | ; | |
6099 | ; Parameters: | |
6100 | ; r3 : address of host pmap, 32-bit kernel virtual address | |
6101 | ; r4 : address of guest pmap, 32-bit kernel virtual address | |
6102 | ; r5 : host virtual address, high-order 32 bits | |
6103 | ; r6 : host virtual address, low-order 32 bits | |
6104 | ; r7 : guest virtual address, high-order 32 bits | |
6105 | ; r8 : guest virtual address, low-order 32 bits | |
6106 | ; r9 : guest mapping protection code | |
6107 | ; | |
6108 | ; Non-volatile register usage: | |
6109 | ; r23 : VMM extension block's physical address | |
6110 | ; r24 : physent physical address | |
6111 | ; r25 : caller's msr image from mapSetUp | |
6112 | ; r26 : guest mapping protection code | |
6113 | ; r27 : host pmap physical address | |
6114 | ; r28 : guest pmap physical address | |
6115 | ; r29 : host virtual address | |
6116 | ; r30 : guest virtual address | |
6117 | ; r31 : gva->phys mapping's physical address | |
6118 | ; | |
6119 | .align 5 | |
6120 | .globl EXT(hw_res_map_gv) | |
6121 | ||
6122 | LEXT(hw_res_map_gv) | |
6123 | ||
6124 | #define grsStackSize ((31-23+1)*4)+4 | |
6125 | ||
6126 | stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1) | |
6127 | ; Mint a new stack frame | |
6128 | mflr r0 ; Get caller's return address | |
6129 | mfsprg r11,2 ; Get feature flags | |
6130 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
6131 | stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6132 | ; Save caller's return address | |
6133 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
6134 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
6135 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
6136 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
6137 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
6138 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
6139 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
6140 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
6141 | stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23 | |
6142 | ||
6143 | rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr | |
6144 | rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr | |
6145 | mr r26,r9 ; Copy guest mapping protection code | |
6146 | ||
6147 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
6148 | lwz r9,pmapSpace(r4) ; r9 <- guest space ID number | |
6149 | bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately | |
6150 | lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr | |
6151 | lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt | |
6152 | lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt | |
6153 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6154 | srwi r11,r30,12 ; Form shadow hash: | |
6155 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6156 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6157 | ; Form index offset from hash page number | |
6158 | add r31,r31,r10 ; r31 <- hash page index entry | |
6159 | lwz r31,4(r31) ; r31 <- hash page paddr | |
6160 | rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
6161 | ; r31 <- hash group paddr | |
6162 | b grsStart ; Get to it | |
6163 | ||
6164 | grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr | |
6165 | rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr | |
6166 | ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr | |
6167 | ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt | |
6168 | ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt | |
6169 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6170 | srwi r11,r30,12 ; Form shadow hash: | |
6171 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6172 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6173 | ; Form index offset from hash page number | |
6174 | add r31,r31,r10 ; r31 <- hash page index entry | |
6175 | ld r31,0(r31) ; r31 <- hash page paddr | |
6176 | insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
6177 | ; r31 <- hash group paddr | |
6178 | ||
6179 | grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real | |
6180 | xor r28,r4,r28 ; Convert guest pmap_t virt->real | |
6181 | bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode | |
6182 | mr r25,r11 ; Save caller's msr image | |
6183 | ||
6184 | la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
6185 | bl sxlkExclusive ; Get lock exclusive | |
6186 | ||
6187 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6188 | mtctr r0 ; in this group | |
6189 | bt++ pf64Bitb,grs64Search ; Test for 64-bit machine | |
6190 | ||
6191 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6192 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6193 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
6194 | b grs32SrchLp ; Let the search begin! | |
6195 | ||
6196 | .align 5 | |
6197 | grs32SrchLp: | |
6198 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6199 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6200 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6201 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6202 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6203 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
6204 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6205 | xor r7,r7,r9 ; Compare space ID | |
6206 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6207 | xor r8,r8,r30 ; Compare virtual address | |
6208 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6209 | beq grsSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6210 | ||
6211 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6212 | bdnz grs32SrchLp ; Iterate | |
6213 | ||
6214 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6215 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
6216 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6217 | xor r4,r4,r9 ; Compare space ID | |
6218 | or r0,r11,r4 ; r0 <- !(!free && space match) | |
6219 | xor r5,r5,r30 ; Compare virtual address | |
6220 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
6221 | beq grsSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6222 | b grsSrchMiss ; No joy in our hash group | |
6223 | ||
6224 | grs64Search: | |
6225 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6226 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6227 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
6228 | b grs64SrchLp ; Let the search begin! | |
6229 | ||
6230 | .align 5 | |
6231 | grs64SrchLp: | |
6232 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6233 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6234 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6235 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6236 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6237 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
6238 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6239 | xor r7,r7,r9 ; Compare space ID | |
6240 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6241 | xor r8,r8,r30 ; Compare virtual address | |
6242 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6243 | beq grsSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6244 | ||
6245 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6246 | bdnz grs64SrchLp ; Iterate | |
6247 | ||
6248 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6249 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
6250 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6251 | xor r4,r4,r9 ; Compare space ID | |
6252 | or r0,r11,r4 ; r0 <- !(!free && space match) | |
6253 | xor r5,r5,r30 ; Compare virtual address | |
6254 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
6255 | bne grsSrchMiss ; No joy in our hash group | |
6256 | ||
6257 | grsSrchHit: | |
6258 | rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant? | |
6259 | bne grsFindHost ; Yes, nothing to disconnect | |
6260 | ||
6261 | bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately | |
6262 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6263 | ; r31 <- mapping's physical address | |
6264 | ; r3 -> PTE slot physical address | |
6265 | ; r4 -> High-order 32 bits of PTE | |
6266 | ; r5 -> Low-order 32 bits of PTE | |
6267 | ; r6 -> PCA | |
6268 | ; r7 -> PCA physical address | |
6269 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
6270 | b grsFreePTE ; Join 64-bit path to release the PTE | |
6271 | grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
6272 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
6273 | grsFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
6274 | beq- grsFindHost ; No valid PTE, we're almost done | |
6275 | lis r0,0x8000 ; Prepare free bit for this slot | |
6276 | srw r0,r0,r2 ; Position free bit | |
6277 | or r6,r6,r0 ; Set it in our PCA image | |
6278 | lwz r8,mpPte(r31) ; Get PTE pointer | |
6279 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
6280 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
6281 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
6282 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
6283 | ||
6284 | grsFindHost: | |
6285 | ||
6286 | // We now have a dormant guest mapping that matches our space id and virtual address. Our next | |
6287 | // step is to locate the host mapping that completes the guest mapping's connection to a physical | |
6288 | // frame. The guest and host mappings must connect to the same physical frame, so they must both | |
6289 | // be chained on the same physent. We search the physent chain for a host mapping matching our | |
6290 | // host's space id and the host virtual address. If we succeed, we know that the entire chain | |
6291 | // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be | |
6292 | // resumed. If we fail to find the specified host virtual->physical mapping, it is because the | |
6293 | // host virtual or physical address has changed since the guest mapping was suspended, so it | |
6294 | // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell | |
6295 | // our caller that it will have to take its long path, translating the host virtual address | |
6296 | // through the host's skiplist and installing a new guest mapping. | |
6297 | ||
6298 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
6299 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
6300 | mr. r24,r3 ; Got lock on our physent? | |
6301 | beq-- grsBadPLock ; No, time to bail out | |
6302 | ||
6303 | bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search | |
6304 | ||
6305 | lwz r9,ppLink+4(r24) ; Get first mapping on physent | |
6306 | lwz r6,pmapSpace(r27) ; Get host pmap's space id number | |
6307 | rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags | |
6308 | grsPELoop: mr. r12,r9 ; Got a mapping to look at? | |
6309 | beq- grsPEMiss ; Nope, we've missed hva->phys mapping | |
6310 | lwz r7,mpFlags(r12) ; Get mapping's flags | |
6311 | lhz r4,mpSpace(r12) ; Get mapping's space id number | |
6312 | lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address | |
6313 | lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain | |
6314 | ||
6315 | rlwinm r0,r7,0,mpType ; Isolate mapping's type | |
6316 | rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags | |
6317 | xori r0,r0,mpNormal ; Normal mapping? | |
6318 | xor r4,r4,r6 ; Compare w/ host space id number | |
6319 | xor r5,r5,r29 ; Compare w/ host virtual address | |
6320 | or r0,r0,r4 ; r0 <- (wrong type || !space id) | |
6321 | or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit) | |
6322 | beq grsPEHit ; Hit | |
6323 | b grsPELoop ; Iterate | |
6324 | ||
6325 | grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer | |
6326 | rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6327 | ld r9,ppLink(r24) ; Get first mapping on physent | |
6328 | lwz r6,pmapSpace(r27) ; Get pmap's space id number | |
6329 | andc r9,r9,r0 ; Cleanup mapping pointer | |
6330 | grsPELp64: mr. r12,r9 ; Got a mapping to look at? | |
6331 | beq-- grsPEMiss ; Nope, we've missed hva->phys mapping | |
6332 | lwz r7,mpFlags(r12) ; Get mapping's flags | |
6333 | lhz r4,mpSpace(r12) ; Get mapping's space id number | |
6334 | ld r5,mpVAddr(r12) ; Get mapping's virtual address | |
6335 | ld r9,mpAlias(r12) ; Next mapping physent alias chain | |
6336 | rlwinm r0,r7,0,mpType ; Isolate mapping's type | |
6337 | rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags | |
6338 | xori r0,r0,mpNormal ; Normal mapping? | |
6339 | xor r4,r4,r6 ; Compare w/ host space id number | |
6340 | xor r5,r5,r29 ; Compare w/ host virtual address | |
6341 | or r0,r0,r4 ; r0 <- (wrong type || !space id) | |
6342 | or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit) | |
6343 | beq grsPEHit ; Hit | |
6344 | b grsPELp64 ; Iterate | |
6345 | ||
6346 | grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits | |
6347 | rlwimi r0,r26,0,mpPP ; Insert new protection bits | |
6348 | stw r0,mpVAddr+4(r31) ; Write 'em back | |
6349 | ||
6350 | eieio ; Ensure previous mapping updates are visible | |
6351 | lwz r0,mpFlags(r31) ; Get flags | |
6352 | rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag | |
6353 | stw r0,mpFlags(r31) ; Set updated flags, entry is now valid | |
6354 | ||
6355 | li r31,mapRtOK ; Indicate success | |
6356 | b grsRelPhy ; Exit through physent lock release | |
6357 | ||
6358 | grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor | |
6359 | bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine | |
6360 | la r11,ppLink+4(r24) ; Point to chain anchor | |
6361 | lwz r9,ppLink+4(r24) ; Get chain anchor | |
6362 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
6363 | grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good | |
6364 | cmplw r9,r31 ; Is this the mapping to remove? | |
6365 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
6366 | bne grsRemNext ; No, chain onward | |
6367 | bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor | |
6368 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
6369 | b grsDelete ; Finish deleting mapping | |
6370 | grsRemRetry: | |
6371 | lwarx r0,0,r11 ; Get previous link | |
6372 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
6373 | stwcx. r0,0,r11 ; Update previous link | |
6374 | bne- grsRemRetry ; Lost reservation, retry | |
6375 | b grsDelete ; Finish deleting mapping | |
6376 | ||
6377 | .align 5 | |
6378 | grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
6379 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6380 | mr. r9,r8 ; Does next entry exist? | |
6381 | b grsRemLoop ; Carry on | |
6382 | ||
6383 | grsRemove64: | |
6384 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
6385 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6386 | la r11,ppLink(r24) ; Point to chain anchor | |
6387 | ld r9,ppLink(r24) ; Get chain anchor | |
6388 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
6389 | grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good | |
6390 | cmpld r9,r31 ; Is this the mapping to remove? | |
6391 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
6392 | bne grsRem64Nxt ; Not mapping to remove, chain on, dude | |
6393 | bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor | |
6394 | std r8,0(r11) ; Unchain gpv->phys mapping | |
6395 | b grsDelete ; Finish deleting mapping | |
6396 | grsRem64Rt: ldarx r0,0,r11 ; Get previous link | |
6397 | and r0,r0,r7 ; Get flags | |
6398 | or r0,r0,r8 ; Insert new forward pointer | |
6399 | stdcx. r0,0,r11 ; Slam it back in | |
6400 | bne-- grsRem64Rt ; Lost reservation, retry | |
6401 | b grsDelete ; Finish deleting mapping | |
6402 | ||
6403 | .align 5 | |
6404 | grsRem64Nxt: | |
6405 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
6406 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6407 | mr. r9,r8 ; Does next entry exist? | |
6408 | b grsRem64Lp ; Carry on | |
6409 | ||
6410 | grsDelete: | |
6411 | lwz r3,mpFlags(r31) ; Get mapping's flags | |
6412 | rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags | |
6413 | ori r3,r3,mpgFree ; Mark mapping free | |
6414 | stw r3,mpFlags(r31) ; Update flags | |
6415 | ||
6416 | li r31,mapRtNotFnd ; Didn't succeed | |
6417 | ||
6418 | grsRelPhy: mr r3,r24 ; r3 <- physent addr | |
6419 | bl mapPhysUnlock ; Unlock physent chain | |
6420 | ||
6421 | grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
6422 | bl sxlkUnlock ; Release host pmap search lock | |
6423 | ||
6424 | grsRtn: mr r3,r31 ; r3 <- result code | |
6425 | bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately | |
6426 | mtmsr r25 ; Restore 'rupts, translation | |
6427 | isync ; Throw a small wrench into the pipeline | |
6428 | b grsPopFrame ; Nothing to do now but pop a frame and return | |
6429 | grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode | |
6430 | grsPopFrame: | |
6431 | lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6432 | ; Get caller's return address | |
6433 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
6434 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
6435 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
6436 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
6437 | mtlr r0 ; Prepare return address | |
6438 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
6439 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
6440 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
6441 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
6442 | lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23 | |
6443 | lwz r1,0(r1) ; Pop stack frame | |
6444 | blr ; Return to caller | |
6445 | ||
6446 | .align 5 | |
6447 | grsSrchMiss: | |
6448 | li r31,mapRtNotFnd ; Could not locate requested mapping | |
6449 | b grsRelPmap ; Exit through host pmap search lock release | |
6450 | ||
6451 | grsBadPLock: | |
6452 | grsPEMissMiss: | |
6453 | lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the | |
6454 | ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb? | |
6455 | li r3,failMapping ; The BOMB, Dmitri. | |
6456 | sc ; The hydrogen bomb. | |
6457 | ||
6458 | ||
6459 | ; | |
6460 | ; Guest shadow assist -- add a guest mapping | |
6461 | ; | |
6462 | ; Adds a guest mapping. | |
6463 | ; | |
6464 | ; Parameters: | |
6465 | ; r3 : address of host pmap, 32-bit kernel virtual address | |
6466 | ; r4 : address of guest pmap, 32-bit kernel virtual address | |
6467 | ; r5 : guest virtual address, high-order 32 bits | |
6468 | ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags) | |
6469 | ; r7 : new mapping's flags | |
6470 | ; r8 : physical address, 32-bit page number | |
6471 | ; | |
6472 | ; Non-volatile register usage: | |
6473 | ; r22 : hash group's physical address | |
6474 | ; r23 : VMM extension block's physical address | |
6475 | ; r24 : mapping's flags | |
6476 | ; r25 : caller's msr image from mapSetUp | |
6477 | ; r26 : physent physical address | |
6478 | ; r27 : host pmap physical address | |
6479 | ; r28 : guest pmap physical address | |
6480 | ; r29 : physical address, 32-bit 4k-page number | |
6481 | ; r30 : guest virtual address | |
6482 | ; r31 : gva->phys mapping's physical address | |
6483 | ; | |
6484 | ||
6485 | .align 5 | |
6486 | .globl EXT(hw_add_map_gv) | |
6487 | ||
6488 | ||
6489 | LEXT(hw_add_map_gv) | |
6490 | ||
6491 | #define gadStackSize ((31-22+1)*4)+4 | |
6492 | ||
6493 | stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1) | |
6494 | ; Mint a new stack frame | |
6495 | mflr r0 ; Get caller's return address | |
6496 | mfsprg r11,2 ; Get feature flags | |
6497 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
6498 | stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6499 | ; Save caller's return address | |
6500 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
6501 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
6502 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
6503 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
6504 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
6505 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
6506 | stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25 | |
6507 | stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24 | |
6508 | stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23 | |
6509 | stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22 | |
6510 | ||
6511 | rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr | |
6512 | rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr | |
6513 | mr r24,r7 ; Copy guest mapping's flags | |
6514 | mr r29,r8 ; Copy target frame's physical address | |
6515 | ||
6516 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
6517 | lwz r9,pmapSpace(r4) ; r9 <- guest space ID number | |
6518 | bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine | |
6519 | lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr | |
6520 | lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt | |
6521 | lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt | |
6522 | la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index | |
6523 | srwi r11,r30,12 ; Form shadow hash: | |
6524 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6525 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6526 | ; Form index offset from hash page number | |
6527 | add r22,r22,r10 ; r22 <- hash page index entry | |
6528 | lwz r22,4(r22) ; r22 <- hash page paddr | |
6529 | rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
6530 | ; r22 <- hash group paddr | |
6531 | b gadStart ; Get to it | |
6532 | ||
6533 | gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr | |
6534 | ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt | |
6535 | ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt | |
6536 | la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index | |
6537 | srwi r11,r30,12 ; Form shadow hash: | |
6538 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6539 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6540 | ; Form index offset from hash page number | |
6541 | add r22,r22,r10 ; r22 <- hash page index entry | |
6542 | ld r22,0(r22) ; r22 <- hash page paddr | |
6543 | insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
6544 | ; r22 <- hash group paddr | |
6545 | ||
6546 | gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real | |
6547 | xor r28,r4,r28 ; Convert guest pmap_t virt->real | |
6548 | bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode | |
6549 | mr r25,r11 ; Save caller's msr image | |
6550 | ||
6551 | la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
6552 | bl sxlkExclusive ; Get lock exlusive | |
6553 | ||
6554 | mr r31,r22 ; Prepare to search this group | |
6555 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6556 | mtctr r0 ; in this group | |
6557 | bt++ pf64Bitb,gad64Search ; Test for 64-bit machine | |
6558 | ||
6559 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6560 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6561 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
6562 | clrrwi r12,r30,12 ; r12 <- virtual address we're searching for | |
6563 | b gad32SrchLp ; Let the search begin! | |
6564 | ||
6565 | .align 5 | |
6566 | gad32SrchLp: | |
6567 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6568 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6569 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6570 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6571 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6572 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
6573 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6574 | xor r7,r7,r9 ; Compare space ID | |
6575 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6576 | xor r8,r8,r12 ; Compare virtual address | |
6577 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6578 | beq gadRelPmap ; Join common path on hit (r31 points to guest mapping) | |
6579 | ||
6580 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6581 | bdnz gad32SrchLp ; Iterate | |
6582 | ||
6583 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6584 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
6585 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6586 | xor r4,r4,r9 ; Compare space ID | |
6587 | or r0,r11,r4 ; r0 <- !(!free && && space match) | |
6588 | xor r5,r5,r12 ; Compare virtual address | |
6589 | or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match | |
6590 | beq gadRelPmap ; Join common path on hit (r31 points to guest mapping) | |
6591 | b gadScan ; No joy in our hash group | |
6592 | ||
6593 | gad64Search: | |
6594 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6595 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6596 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
6597 | clrrdi r12,r30,12 ; r12 <- virtual address we're searching for | |
6598 | b gad64SrchLp ; Let the search begin! | |
6599 | ||
6600 | .align 5 | |
6601 | gad64SrchLp: | |
6602 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6603 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6604 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6605 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6606 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6607 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
6608 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6609 | xor r7,r7,r9 ; Compare space ID | |
6610 | or r0,r11,r7 ; r0 <- !(!free && space match) | |
6611 | xor r8,r8,r12 ; Compare virtual address | |
6612 | or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match | |
6613 | beq gadRelPmap ; Hit, let upper-level redrive sort it out | |
6614 | ||
6615 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6616 | bdnz gad64SrchLp ; Iterate | |
6617 | ||
6618 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6619 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
6620 | rlwinm r11,r6,0,mpgFree ; Isolate guest free flag | |
6621 | xor r4,r4,r9 ; Compare space ID | |
6622 | or r0,r11,r4 ; r0 <- !(!free && && space match) | |
6623 | xor r5,r5,r12 ; Compare virtual address | |
6624 | or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match | |
6625 | bne gadScan ; No joy in our hash group | |
6626 | b gadRelPmap ; Hit, let upper-level redrive sort it out | |
6627 | ||
6628 | gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor | |
6629 | rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2) | |
6630 | ; Prepare to address slot at cursor | |
6631 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6632 | mtctr r0 ; in this group | |
6633 | or r2,r22,r12 ; r2 <- 1st mapping to search | |
6634 | lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags | |
6635 | li r11,0 ; No dormant entries found yet | |
6636 | b gadScanLoop ; Let the search begin! | |
6637 | ||
6638 | .align 5 | |
6639 | gadScanLoop: | |
6640 | addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search | |
6641 | rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2) | |
6642 | ; Trim off any carry, wrapping into slot number range | |
6643 | mr r31,r2 ; r31 <- current mapping's address | |
6644 | or r2,r22,r12 ; r2 <- next mapping to search | |
6645 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6646 | lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags | |
6647 | rlwinm. r0,r6,0,mpgFree ; Test free flag | |
6648 | bne gadFillMap ; Join common path on hit (r31 points to free mapping) | |
6649 | rlwinm r0,r6,0,mpgDormant ; Dormant entry? | |
6650 | xori r0,r0,mpgDormant ; Invert dormant flag | |
6651 | or. r0,r0,r11 ; Skip all but the first dormant entry we see | |
6652 | bne gadNotDorm ; Not dormant or we've already seen one | |
6653 | mr r11,r31 ; We'll use this dormant entry if we don't find a free one first | |
6654 | gadNotDorm: bdnz gadScanLoop ; Iterate | |
6655 | ||
6656 | mr r31,r2 ; r31 <- final mapping's address | |
6657 | rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping | |
6658 | bne gadFillMap ; Join common path on hit (r31 points to dormant mapping) | |
6659 | rlwinm r0,r6,0,mpgDormant ; Dormant entry? | |
6660 | xori r0,r0,mpgDormant ; Invert dormant flag | |
6661 | or. r0,r0,r11 ; Skip all but the first dormant entry we see | |
6662 | bne gadCkDormant ; Not dormant or we've already seen one | |
6663 | mr r11,r31 ; We'll use this dormant entry if we don't find a free one first | |
6664 | ||
6665 | gadCkDormant: | |
6666 | mr. r31,r11 ; Get dormant mapping, if any, and test | |
6667 | bne gadUpCursor ; Go update the cursor, we'll take the dormant entry | |
6668 | ||
6669 | gadSteal: | |
6670 | lbz r12,mpgCursor(r22) ; Get group's cursor | |
6671 | rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2) | |
6672 | ; Prepare to address slot at cursor | |
6673 | or r31,r22,r12 ; r31 <- address of mapping to steal | |
6674 | ||
6675 | bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately | |
6676 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6677 | ; r31 <- mapping's physical address | |
6678 | ; r3 -> PTE slot physical address | |
6679 | ; r4 -> High-order 32 bits of PTE | |
6680 | ; r5 -> Low-order 32 bits of PTE | |
6681 | ; r6 -> PCA | |
6682 | ; r7 -> PCA physical address | |
6683 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
6684 | b gadFreePTE ; Join 64-bit path to release the PTE | |
6685 | gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
6686 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
6687 | gadFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
6688 | beq- gadUpCursor ; No valid PTE, we're almost done | |
6689 | lis r0,0x8000 ; Prepare free bit for this slot | |
6690 | srw r0,r0,r2 ; Position free bit | |
6691 | or r6,r6,r0 ; Set it in our PCA image | |
6692 | lwz r8,mpPte(r31) ; Get PTE pointer | |
6693 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
6694 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
6695 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
6696 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
6697 | ||
6698 | gadUpCursor: | |
6699 | rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK | |
6700 | ; Recover slot number from stolen mapping's address | |
6701 | addi r12,r12,1 ; Increment slot number | |
6702 | rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range | |
6703 | stb r12,mpgCursor(r22) ; Update group's cursor | |
6704 | ||
6705 | lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number | |
6706 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
6707 | mr. r26,r3 ; Got lock on our physent? | |
6708 | beq-- gadBadPLock ; No, time to bail out | |
6709 | ||
6710 | crset cr1_eq ; cr1_eq <- previous link is the anchor | |
6711 | bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine | |
6712 | la r11,ppLink+4(r26) ; Point to chain anchor | |
6713 | lwz r9,ppLink+4(r26) ; Get chain anchor | |
6714 | rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer | |
6715 | gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good | |
6716 | cmplw r9,r31 ; Is this the mapping to remove? | |
6717 | lwz r8,mpAlias+4(r9) ; Get forward chain pointer | |
6718 | bne gadRemNext ; No, chain onward | |
6719 | bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor | |
6720 | stw r8,0(r11) ; Unchain gpv->phys mapping | |
6721 | b gadDelDone ; Finish deleting mapping | |
6722 | gadRemRetry: | |
6723 | lwarx r0,0,r11 ; Get previous link | |
6724 | rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags | |
6725 | stwcx. r0,0,r11 ; Update previous link | |
6726 | bne- gadRemRetry ; Lost reservation, retry | |
6727 | b gadDelDone ; Finish deleting mapping | |
6728 | ||
6729 | gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link | |
6730 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6731 | mr. r9,r8 ; Does next entry exist? | |
6732 | b gadRemLoop ; Carry on | |
6733 | ||
6734 | gadRemove64: | |
6735 | li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
6736 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6737 | la r11,ppLink(r26) ; Point to chain anchor | |
6738 | ld r9,ppLink(r26) ; Get chain anchor | |
6739 | andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer | |
6740 | gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good | |
6741 | cmpld r9,r31 ; Is this the mapping to remove? | |
6742 | ld r8,mpAlias(r9) ; Get forward chain pinter | |
6743 | bne gadRem64Nxt ; Not mapping to remove, chain on, dude | |
6744 | bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor | |
6745 | std r8,0(r11) ; Unchain gpv->phys mapping | |
6746 | b gadDelDone ; Finish deleting mapping | |
6747 | gadRem64Rt: ldarx r0,0,r11 ; Get previous link | |
6748 | and r0,r0,r7 ; Get flags | |
6749 | or r0,r0,r8 ; Insert new forward pointer | |
6750 | stdcx. r0,0,r11 ; Slam it back in | |
6751 | bne-- gadRem64Rt ; Lost reservation, retry | |
6752 | b gadDelDone ; Finish deleting mapping | |
6753 | ||
6754 | .align 5 | |
6755 | gadRem64Nxt: | |
6756 | la r11,mpAlias(r9) ; Point to (soon to be) previous link | |
6757 | crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor | |
6758 | mr. r9,r8 ; Does next entry exist? | |
6759 | b gadRem64Lp ; Carry on | |
6760 | ||
6761 | gadDelDone: | |
6762 | mr r3,r26 ; Get physent address | |
6763 | bl mapPhysUnlock ; Unlock physent chain | |
6764 | ||
6765 | gadFillMap: | |
6766 | lwz r12,pmapSpace(r28) ; Get guest space id number | |
6767 | li r2,0 ; Get a zero | |
6768 | stw r24,mpFlags(r31) ; Set mapping's flags | |
6769 | sth r12,mpSpace(r31) ; Set mapping's space id number | |
6770 | stw r2,mpPte(r31) ; Set mapping's pte pointer invalid | |
6771 | stw r29,mpPAddr(r31) ; Set mapping's physical address | |
6772 | bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine | |
6773 | stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags) | |
6774 | b gadChain ; Continue with chaining mapping to physent | |
6775 | gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags) | |
6776 | ||
6777 | gadChain: mr r3,r29 ; r3 <- physical frame address | |
6778 | bl mapFindLockPN ; Find 'n' lock this page's physent | |
6779 | mr. r26,r3 ; Got lock on our physent? | |
6780 | beq-- gadBadPLock ; No, time to bail out | |
6781 | ||
6782 | bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine | |
6783 | lwz r12,ppLink+4(r26) ; Get forward chain | |
6784 | rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags | |
6785 | rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags | |
6786 | stw r11,mpAlias+4(r31) ; New mapping will head chain | |
6787 | stw r12,ppLink+4(r26) ; Point physent to new mapping | |
6788 | b gadFinish ; All over now... | |
6789 | ||
6790 | gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer | |
6791 | rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F | |
6792 | ld r12,ppLink(r26) ; Get forward chain | |
6793 | andc r11,r12,r7 ; Get physent's forward chain pointer sans flags | |
6794 | and r12,r12,r7 ; Isolate pointer's flags | |
6795 | or r12,r12,r31 ; Insert new mapping's address forming pointer | |
6796 | std r11,mpAlias(r31) ; New mapping will head chain | |
6797 | std r12,ppLink(r26) ; Point physent to new mapping | |
6798 | ||
6799 | gadFinish: eieio ; Ensure new mapping is completely visible | |
6800 | ||
6801 | gadRelPhy: mr r3,r26 ; r3 <- physent addr | |
6802 | bl mapPhysUnlock ; Unlock physent chain | |
6803 | ||
6804 | gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
6805 | bl sxlkUnlock ; Release host pmap search lock | |
6806 | ||
6807 | bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately | |
6808 | mtmsr r25 ; Restore 'rupts, translation | |
6809 | isync ; Throw a small wrench into the pipeline | |
6810 | b gadPopFrame ; Nothing to do now but pop a frame and return | |
6811 | gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode | |
6812 | gadPopFrame: | |
6813 | lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6814 | ; Get caller's return address | |
6815 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
6816 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
6817 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
6818 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
6819 | mtlr r0 ; Prepare return address | |
6820 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
6821 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
6822 | lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25 | |
6823 | lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24 | |
6824 | lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23 | |
6825 | lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22 | |
6826 | lwz r1,0(r1) ; Pop stack frame | |
6827 | blr ; Return to caller | |
6828 | ||
6829 | gadPEMissMiss: | |
6830 | gadBadPLock: | |
6831 | lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the | |
6832 | ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb? | |
6833 | li r3,failMapping ; The BOMB, Dmitri. | |
6834 | sc ; The hydrogen bomb. | |
6835 | ||
6836 | ||
6837 | ; | |
6838 | ; Guest shadow assist -- supend a guest mapping | |
6839 | ; | |
6840 | ; Suspends a guest mapping. | |
6841 | ; | |
6842 | ; Parameters: | |
6843 | ; r3 : address of host pmap, 32-bit kernel virtual address | |
6844 | ; r4 : address of guest pmap, 32-bit kernel virtual address | |
6845 | ; r5 : guest virtual address, high-order 32 bits | |
6846 | ; r6 : guest virtual address, low-order 32 bits | |
6847 | ; | |
6848 | ; Non-volatile register usage: | |
6849 | ; r26 : VMM extension block's physical address | |
6850 | ; r27 : host pmap physical address | |
6851 | ; r28 : guest pmap physical address | |
6852 | ; r29 : caller's msr image from mapSetUp | |
6853 | ; r30 : guest virtual address | |
6854 | ; r31 : gva->phys mapping's physical address | |
6855 | ; | |
6856 | ||
6857 | .align 5 | |
6858 | .globl EXT(hw_susp_map_gv) | |
6859 | ||
6860 | LEXT(hw_susp_map_gv) | |
6861 | ||
6862 | #define gsuStackSize ((31-26+1)*4)+4 | |
6863 | ||
6864 | stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1) | |
6865 | ; Mint a new stack frame | |
6866 | mflr r0 ; Get caller's return address | |
6867 | mfsprg r11,2 ; Get feature flags | |
6868 | mtcrf 0x02,r11 ; Insert feature flags into cr6 | |
6869 | stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
6870 | ; Save caller's return address | |
6871 | stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31 | |
6872 | stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30 | |
6873 | stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29 | |
6874 | stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28 | |
6875 | stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27 | |
6876 | stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26 | |
6877 | ||
6878 | rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr | |
6879 | ||
6880 | lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr | |
6881 | lwz r9,pmapSpace(r4) ; r9 <- guest space ID number | |
6882 | bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine | |
6883 | ||
6884 | lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr | |
6885 | lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt | |
6886 | lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt | |
6887 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6888 | srwi r11,r30,12 ; Form shadow hash: | |
6889 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6890 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6891 | ; Form index offset from hash page number | |
6892 | add r31,r31,r10 ; r31 <- hash page index entry | |
6893 | lwz r31,4(r31) ; r31 <- hash page paddr | |
6894 | rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK | |
6895 | ; r31 <- hash group paddr | |
6896 | b gsuStart ; Get to it | |
6897 | gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr | |
6898 | ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr | |
6899 | ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt | |
6900 | ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt | |
6901 | la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index | |
6902 | srwi r11,r30,12 ; Form shadow hash: | |
6903 | xor r11,r11,r9 ; spaceID ^ (vaddr >> 12) | |
6904 | rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK | |
6905 | ; Form index offset from hash page number | |
6906 | add r31,r31,r10 ; r31 <- hash page index entry | |
6907 | ld r31,0(r31) ; r31 <- hash page paddr | |
6908 | insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2) | |
6909 | ; r31 <- hash group paddr | |
6910 | ||
6911 | gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real | |
6912 | xor r28,r4,r28 ; Convert guest pmap_t virt->real | |
6913 | bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode | |
6914 | mr r29,r11 ; Save caller's msr image | |
6915 | ||
6916 | la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address | |
6917 | bl sxlkExclusive ; Get lock exclusive | |
6918 | ||
6919 | li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots | |
6920 | mtctr r0 ; in this group | |
6921 | bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine | |
6922 | ||
6923 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6924 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6925 | lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address | |
6926 | b gsu32SrchLp ; Let the search begin! | |
6927 | ||
6928 | .align 5 | |
6929 | gsu32SrchLp: | |
6930 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6931 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6932 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6933 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6934 | clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6935 | lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr | |
6936 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6937 | xor r7,r7,r9 ; Compare space ID | |
6938 | or r0,r11,r7 ; r0 <- !(!free && !dormant && space match) | |
6939 | xor r8,r8,r30 ; Compare virtual address | |
6940 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6941 | beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6942 | ||
6943 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6944 | bdnz gsu32SrchLp ; Iterate | |
6945 | ||
6946 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6947 | clrrwi r5,r5,12 ; Remove flags from virtual address | |
6948 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6949 | xor r4,r4,r9 ; Compare space ID | |
6950 | or r0,r11,r4 ; r0 <- !(!free && !dormant && space match) | |
6951 | xor r5,r5,r30 ; Compare virtual address | |
6952 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6953 | beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6954 | b gsuSrchMiss ; No joy in our hash group | |
6955 | ||
6956 | gsu64Search: | |
6957 | lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags | |
6958 | lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID | |
6959 | ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address | |
6960 | b gsu64SrchLp ; Let the search begin! | |
6961 | ||
6962 | .align 5 | |
6963 | gsu64SrchLp: | |
6964 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6965 | lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags | |
6966 | mr r7,r4 ; r7 <- current mapping slot's space ID | |
6967 | lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID | |
6968 | clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags | |
6969 | ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr | |
6970 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6971 | xor r7,r7,r9 ; Compare space ID | |
6972 | or r0,r11,r7 ; r0 <- !(!free && !dormant && space match) | |
6973 | xor r8,r8,r30 ; Compare virtual address | |
6974 | or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6975 | beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping) | |
6976 | ||
6977 | addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot | |
6978 | bdnz gsu64SrchLp ; Iterate | |
6979 | ||
6980 | mr r6,r3 ; r6 <- current mapping slot's flags | |
6981 | clrrdi r5,r5,12 ; Remove flags from virtual address | |
6982 | andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags | |
6983 | xor r4,r4,r9 ; Compare space ID | |
6984 | or r0,r11,r4 ; r0 <- !(!free && !dormant && space match) | |
6985 | xor r5,r5,r30 ; Compare virtual address | |
6986 | or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match | |
6987 | bne gsuSrchMiss ; No joy in our hash group | |
6988 | ||
6989 | gsuSrchHit: | |
6990 | bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately | |
6991 | bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change | |
6992 | ; r31 <- mapping's physical address | |
6993 | ; r3 -> PTE slot physical address | |
6994 | ; r4 -> High-order 32 bits of PTE | |
6995 | ; r5 -> Low-order 32 bits of PTE | |
6996 | ; r6 -> PCA | |
6997 | ; r7 -> PCA physical address | |
6998 | rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs) | |
6999 | b gsuFreePTE ; Join 64-bit path to release the PTE | |
7000 | gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change | |
7001 | rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs) | |
7002 | gsuFreePTE: mr. r3,r3 ; Was there a valid PTE? | |
7003 | beq- gsuNoPTE ; No valid PTE, we're almost done | |
7004 | lis r0,0x8000 ; Prepare free bit for this slot | |
7005 | srw r0,r0,r2 ; Position free bit | |
7006 | or r6,r6,r0 ; Set it in our PCA image | |
7007 | lwz r8,mpPte(r31) ; Get PTE pointer | |
7008 | rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid | |
7009 | stw r8,mpPte(r31) ; Save invalidated PTE pointer | |
7010 | eieio ; Synchronize all previous updates (mapInvPtexx didn't) | |
7011 | stw r6,0(r7) ; Update PCA and unlock the PTEG | |
7012 | ||
7013 | gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags | |
7014 | ori r3,r3,mpgDormant ; Mark entry dormant | |
7015 | stw r3,mpFlags(r31) ; Save updated flags | |
7016 | eieio ; Ensure update is visible when we unlock | |
7017 | ||
7018 | gsuSrchMiss: | |
7019 | la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr | |
7020 | bl sxlkUnlock ; Release host pmap search lock | |
7021 | ||
7022 | bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately | |
7023 | mtmsr r29 ; Restore 'rupts, translation | |
7024 | isync ; Throw a small wrench into the pipeline | |
7025 | b gsuPopFrame ; Nothing to do now but pop a frame and return | |
7026 | gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode | |
7027 | gsuPopFrame: | |
7028 | lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1) | |
7029 | ; Get caller's return address | |
7030 | lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31 | |
7031 | lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30 | |
7032 | lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29 | |
7033 | lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28 | |
7034 | mtlr r0 ; Prepare return address | |
7035 | lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27 | |
7036 | lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26 | |
7037 | lwz r1,0(r1) ; Pop stack frame | |
7038 | blr ; Return to caller | |
7039 | ||
7040 | ; | |
7041 | ; Guest shadow assist -- test guest mapping reference and change bits | |
7042 | ; | |
7043 | ; Locates the specified guest mapping, and if it exists gathers its reference | |
7044 |