]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_vm.s
xnu-792.6.70.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_vm.s
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <assym.s>
23#include <debug.h>
1c79356b
A
24#include <db_machine_commands.h>
25#include <mach_rt.h>
26
27#include <mach_debug.h>
28#include <ppc/asm.h>
29#include <ppc/proc_reg.h>
30#include <ppc/exception.h>
31#include <ppc/Performance.h>
32#include <ppc/exception.h>
1c79356b 33#include <mach/ppc/vm_param.h>
1c79356b
A
34
35 .text
36
55e303ae
A
37;
38; 0 0 1 2 3 4 4 5 6
39; 0 8 6 4 2 0 8 6 3
40; +--------+--------+--------+--------+--------+--------+--------+--------+
41; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
42; +--------+--------+--------+--------+--------+--------+--------+--------+
43;
44; 0 0 1
45; 0 8 6
46; +--------+--------+--------+
47; |//////BB|BBBBBBBB|BBBB////| - SID - base
48; +--------+--------+--------+
49;
50; 0 0 1
51; 0 8 6
52; +--------+--------+--------+
53; |////////|11111111|111111//| - SID - copy 1
54; +--------+--------+--------+
55;
56; 0 0 1
57; 0 8 6
58; +--------+--------+--------+
59; |////////|//222222|22222222| - SID - copy 2
60; +--------+--------+--------+
61;
62; 0 0 1
63; 0 8 6
64; +--------+--------+--------+
65; |//////33|33333333|33//////| - SID - copy 3 - not needed
66; +--------+--------+--------+ for 65 bit VPN
67;
68; 0 0 1 2 3 4 4 5 5
69; 0 8 6 4 2 0 8 1 5
70; +--------+--------+--------+--------+--------+--------+--------+
71; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
72; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
73; 0 0 1 2 3 4 4 5 5
74; 0 8 6 4 2 0 8 1 5
75; +--------+--------+--------+--------+--------+--------+--------+
76; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
77; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
78; part of EA to make
79; room for SID base
80;
81;
82; 0 0 1 2 3 4 4 5 5
83; 0 8 6 4 2 0 8 1 5
84; +--------+--------+--------+--------+--------+--------+--------+
85; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
86; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
87;
88; 0 0 1 2 3 4 4 5 6 7 7
89; 0 8 6 4 2 0 8 6 4 2 9
90; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
91; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
92; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
93;
1c79356b
A
94
95
55e303ae 96/* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
1c79356b 97 *
55e303ae 98 * Maps a page or block into a pmap
de355530 99 *
55e303ae 100 * Returns 0 if add worked or the vaddr of the first overlap if not
1c79356b 101 *
55e303ae
A
102 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
103 *
104 * 1) bump mapping busy count
105 * 2) lock pmap share
106 * 3) find mapping full path - finds all possible list previous elements
107 * 4) upgrade pmap to exclusive
108 * 5) add mapping to search list
109 * 6) find physent
110 * 7) lock physent
111 * 8) add to physent
112 * 9) unlock physent
113 * 10) unlock pmap
114 * 11) drop mapping busy count
115 *
116 *
117 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
118 *
119 * 1) bump mapping busy count
120 * 2) lock pmap share
121 * 3) find mapping full path - finds all possible list previous elements
122 * 4) upgrade pmap to exclusive
123 * 5) add mapping to search list
124 * 6) unlock pmap
125 * 7) drop mapping busy count
126 *
1c79356b
A
127 */
128
129 .align 5
130 .globl EXT(hw_add_map)
131
132LEXT(hw_add_map)
55e303ae
A
133
134 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
135 mflr r0 ; Save the link register
136 stw r17,FM_ARG0+0x00(r1) ; Save a register
137 stw r18,FM_ARG0+0x04(r1) ; Save a register
138 stw r19,FM_ARG0+0x08(r1) ; Save a register
139 mfsprg r19,2 ; Get feature flags
140 stw r20,FM_ARG0+0x0C(r1) ; Save a register
141 stw r21,FM_ARG0+0x10(r1) ; Save a register
142 mtcrf 0x02,r19 ; move pf64Bit cr6
143 stw r22,FM_ARG0+0x14(r1) ; Save a register
144 stw r23,FM_ARG0+0x18(r1) ; Save a register
145 stw r24,FM_ARG0+0x1C(r1) ; Save a register
146 stw r25,FM_ARG0+0x20(r1) ; Save a register
147 stw r26,FM_ARG0+0x24(r1) ; Save a register
148 stw r27,FM_ARG0+0x28(r1) ; Save a register
149 stw r28,FM_ARG0+0x2C(r1) ; Save a register
150 stw r29,FM_ARG0+0x30(r1) ; Save a register
151 stw r30,FM_ARG0+0x34(r1) ; Save a register
152 stw r31,FM_ARG0+0x38(r1) ; Save a register
153 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
154
91447636
A
155#if DEBUG
156 lwz r11,pmapFlags(r3) ; Get pmaps flags
157 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
158 bne hamPanic ; Call not valid for guest shadow assist pmap
159#endif
160
55e303ae
A
161 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
162 mr r28,r3 ; Save the pmap
163 mr r31,r4 ; Save the mapping
164 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
165 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
166 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
167
168 b hamSF1x ; Done...
169
170hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
171 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
172
173hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
174
175 mr r17,r11 ; Save the MSR
176 xor r28,r28,r20 ; Convert the pmap to physical addressing
177 xor r31,r31,r21 ; Convert the mapping to physical addressing
178
179 la r3,pmapSXlk(r28) ; Point to the pmap search lock
180 bl sxlkShared ; Go get a shared lock on the mapping lists
181 mr. r3,r3 ; Did we get the lock?
182 lwz r24,mpFlags(r31) ; Pick up the flags
183 bne-- hamBadLock ; Nope...
184
185 li r21,0 ; Remember that we have the shared lock
1c79356b 186
55e303ae
A
187;
188; Note that we do a full search (i.e., no shortcut level skips, etc.)
189; here so that we will know the previous elements so we can dequeue them
190; later.
191;
de355530 192
55e303ae
A
193hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
194 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
195 mr r3,r28 ; Pass in pmap to search
196 lhz r23,mpBSize(r31) ; Get the block size for later
197 mr r29,r4 ; Save top half of vaddr for later
198 mr r30,r5 ; Save bottom half of vaddr for later
199
55e303ae
A
200 bl EXT(mapSearchFull) ; Go see if we can find it
201
3a60a9f5
A
202 li r22,lo16(0x800C) ; Get 0xFFFF800C
203 rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
204 addi r23,r23,1 ; Get actual length
205 rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25
55e303ae 206 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3a60a9f5
A
207 slw r9,r23,r22 ; Isolate the low part
208 rlwnm r22,r23,r22,22,31 ; Extract the high order
209 addic r23,r9,-4096 ; Get the length to the last page
210 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
211 addme r22,r22 ; Do high order as well...
55e303ae 212 mr. r3,r3 ; Did we find a mapping here?
3a60a9f5
A
213 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
214 bne-- hamOverlay ; We found a mapping, this is no good, can not double map...
215
55e303ae
A
216 addc r9,r0,r23 ; Add size to get last page in new range
217 or. r0,r4,r5 ; Are we beyond the end?
218 adde r8,r29,r22 ; Add the rest of the length on
55e303ae
A
219 rlwinm r9,r9,0,0,31 ; Clean top half of sum
220 beq++ hamFits ; We are at the end...
3a60a9f5 221
55e303ae
A
222 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
223 cmplw r8,r4 ; Is our end before the next (top part)
224 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
225 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
226
227 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
d7e50217 228
55e303ae
A
229;
230; Here we try to convert to an exclusive lock. This will fail if someone else
231; has it shared.
232;
233hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
234 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1c79356b 235
55e303ae
A
236 bne-- hamGotX ; We already have the exclusive...
237
238 bl sxlkPromote ; Try to promote shared to exclusive
239 mr. r3,r3 ; Could we?
240 beq++ hamGotX ; Yeah...
241
242;
243; Since we could not promote our lock, we need to convert to it.
244; That means that we drop the shared lock and wait to get it
245; exclusive. Since we release the lock, we need to do the look up
246; again.
247;
d7e50217 248
55e303ae
A
249 la r3,pmapSXlk(r28) ; Point to the pmap search lock
250 bl sxlkConvert ; Convert shared to exclusive
251 mr. r3,r3 ; Could we?
252 bne-- hamBadLock ; Nope, we must have timed out...
1c79356b 253
55e303ae
A
254 li r21,1 ; Remember that we have the exclusive lock
255 b hamRescan ; Go look again...
1c79356b 256
55e303ae 257 .align 5
1c79356b 258
3a60a9f5 259hamGotX: mr r3,r28 ; Get the pmap to insert into
55e303ae
A
260 mr r4,r31 ; Point to the mapping
261 bl EXT(mapInsert) ; Insert the mapping into the list
262
91447636 263 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
55e303ae 264 lhz r8,mpSpace(r31) ; Get the address space
91447636 265 lwz r11,lgpPcfg(r11) ; Get the page config
55e303ae
A
266 mfsdr1 r7 ; Get the hash table base/bounds
267 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
91447636
A
268
269 andi. r0,r24,mpType ; Is this a normal mapping?
55e303ae
A
270
271 rlwimi r8,r8,14,4,17 ; Double address space
91447636 272 rlwinm r9,r30,0,4,31 ; Clear segment
55e303ae
A
273 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
274 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
275 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
276 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
277 addi r4,r4,1 ; Bump up the mapped page count
91447636 278 srw r9,r9,r11 ; Isolate just the page index
55e303ae
A
279 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
280 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
281 xor r9,r9,r10 ; Get the hash to the PTEG
282
91447636 283 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
55e303ae
A
284
285 bl mapPhysFindLock ; Go find and lock the physent
286
287 bt++ pf64Bitb,ham64 ; This is 64-bit...
288
289 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
290 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
291 slwi r9,r9,6 ; Make PTEG offset
292 ori r7,r7,0xFFC0 ; Stick in the bottom part
91447636 293 rlwinm r12,r11,0,~ppFlags ; Clean it up
55e303ae
A
294 and r9,r9,r7 ; Wrap offset into table
295 mr r4,r31 ; Set the link to install
296 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
297 stw r12,mpAlias+4(r31) ; Move to the mapping
298 bl mapPhyCSet32 ; Install the link
299 b hamDone ; Go finish up...
300
301 .align 5
1c79356b 302
91447636 303ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
55e303ae
A
304 subfic r7,r7,46 ; Get number of leading zeros
305 eqv r4,r4,r4 ; Get all ones
306 ld r11,ppLink(r3) ; Get the alias chain pointer
91447636 307 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
308 srd r4,r4,r7 ; Get the wrap mask
309 sldi r9,r9,7 ; Change hash to PTEG offset
310 andc r11,r11,r0 ; Clean out the lock and flags
311 and r9,r9,r4 ; Wrap to PTEG
312 mr r4,r31
313 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
314 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
315
316 bl mapPhyCSet64 ; Install the link
317
318hamDone: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 319
55e303ae
A
320hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
321 bl sxlkUnlock ; Unlock the search list
1c79356b 322
55e303ae
A
323 mr r3,r31 ; Get the mapping pointer
324 bl mapDropBusy ; Drop the busy count
1c79356b 325
55e303ae
A
326 li r3,0 ; Set successful return
327 li r4,0 ; Set successful return
1c79356b 328
55e303ae 329hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
1c79356b 330
55e303ae
A
331 mtmsr r17 ; Restore enables/translation/etc.
332 isync
333 b hamReturnC ; Join common...
1c79356b 334
55e303ae
A
335hamR64: mtmsrd r17 ; Restore enables/translation/etc.
336 isync
1c79356b 337
3a60a9f5 338hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
55e303ae
A
339 lwz r17,FM_ARG0+0x00(r1) ; Save a register
340 lwz r18,FM_ARG0+0x04(r1) ; Save a register
341 lwz r19,FM_ARG0+0x08(r1) ; Save a register
342 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
343 mtlr r0 ; Restore the return
344 lwz r21,FM_ARG0+0x10(r1) ; Save a register
345 lwz r22,FM_ARG0+0x14(r1) ; Save a register
346 lwz r23,FM_ARG0+0x18(r1) ; Save a register
347 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
348 lwz r25,FM_ARG0+0x20(r1) ; Save a register
349 lwz r26,FM_ARG0+0x24(r1) ; Save a register
350 lwz r27,FM_ARG0+0x28(r1) ; Save a register
351 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
352 lwz r29,FM_ARG0+0x30(r1) ; Save a register
353 lwz r30,FM_ARG0+0x34(r1) ; Save a register
354 lwz r31,FM_ARG0+0x38(r1) ; Save a register
355 lwz r1,0(r1) ; Pop the stack
d7e50217 356
55e303ae 357 blr ; Leave...
d7e50217 358
de355530 359
de355530 360 .align 5
d7e50217 361
55e303ae
A
362hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
363 li r0,mpC|mpR ; Get a mask to turn off RC bits
364 lwz r23,mpFlags(r31) ; Get the requested flags
365 lwz r20,mpVAddr(r3) ; Get the overlay address
366 lwz r8,mpVAddr(r31) ; Get the requested address
367 lwz r21,mpVAddr+4(r3) ; Get the overlay address
368 lwz r9,mpVAddr+4(r31) ; Get the requested address
369 lhz r10,mpBSize(r3) ; Get the overlay length
370 lhz r11,mpBSize(r31) ; Get the requested length
371 lwz r24,mpPAddr(r3) ; Get the overlay physical address
372 lwz r25,mpPAddr(r31) ; Get the requested physical address
373 andc r21,r21,r0 ; Clear RC bits
374 andc r9,r9,r0 ; Clear RC bits
375
376 la r3,pmapSXlk(r28) ; Point to the pmap search lock
377 bl sxlkUnlock ; Unlock the search list
378
379 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
380 mr r3,r20 ; Save the top of the colliding address
381 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
382
383 bne++ hamRemv ; Removing, go say so so we help...
384
385 cmplw r20,r8 ; High part of vaddr the same?
386 cmplw cr1,r21,r9 ; Low part?
387 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
388
389 cmplw r10,r11 ; Size the same?
390 cmplw cr1,r24,r25 ; Physical address?
391 crand cr5_eq,cr5_eq,cr0_eq ; Remember
392 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
393
91447636
A
394 xor r23,r23,r22 ; Compare mapping flag words
395 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
55e303ae 396 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
91447636 397 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
55e303ae
A
398
399 ori r4,r4,mapRtMapDup ; Set duplicate
400 b hamReturn ; And leave...
401
402hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
403 b hamReturn ; Come back yall...
91447636
A
404
405hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
406 b hamReturn ; Join common epilog code
55e303ae
A
407
408 .align 5
409
410hamBadLock: li r3,0 ; Set lock time out error code
411 li r4,mapRtBadLk ; Set lock time out error code
412 b hamReturn ; Leave....
413
91447636
A
414hamPanic: lis r0,hi16(Choke) ; System abend
415 ori r0,r0,lo16(Choke) ; System abend
416 li r3,failMapping ; Show that we failed some kind of mapping thing
417 sc
55e303ae 418
1c79356b 419
1c79356b
A
420
421
422/*
55e303ae 423 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
de355530 424 *
55e303ae
A
425 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
426 * a 64-bit quantity, it is a long long so it is in R4 and R5.
427 *
428 * We return the virtual address of the removed mapping as a
429 * R3.
1c79356b 430 *
55e303ae 431 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 432 *
55e303ae
A
433 * We disable translation and all interruptions here. This keeps is
434 * from having to worry about a deadlock due to having anything locked
435 * and needing it to process a fault.
1c79356b
A
436 *
437 * Note that this must be done with both interruptions off and VM off
438 *
55e303ae
A
439 * Remove mapping via pmap, regular page, no pte
440 *
441 * 1) lock pmap share
442 * 2) find mapping full path - finds all possible list previous elements
443 * 4) upgrade pmap to exclusive
444 * 3) bump mapping busy count
445 * 5) remove mapping from search list
446 * 6) unlock pmap
447 * 7) lock physent
448 * 8) remove from physent
449 * 9) unlock physent
450 * 10) drop mapping busy count
451 * 11) drain mapping busy count
452 *
453 *
454 * Remove mapping via pmap, regular page, with pte
455 *
456 * 1) lock pmap share
457 * 2) find mapping full path - finds all possible list previous elements
458 * 3) upgrade lock to exclusive
459 * 4) bump mapping busy count
460 * 5) lock PTEG
461 * 6) invalidate pte and tlbie
462 * 7) atomic merge rc into physent
463 * 8) unlock PTEG
464 * 9) remove mapping from search list
465 * 10) unlock pmap
466 * 11) lock physent
467 * 12) remove from physent
468 * 13) unlock physent
469 * 14) drop mapping busy count
470 * 15) drain mapping busy count
471 *
472 *
473 * Remove mapping via pmap, I/O or block
474 *
475 * 1) lock pmap share
476 * 2) find mapping full path - finds all possible list previous elements
477 * 3) upgrade lock to exclusive
478 * 4) bump mapping busy count
479 * 5) mark remove-in-progress
480 * 6) check and bump remove chunk cursor if needed
481 * 7) unlock pmap
482 * 8) if something to invalidate, go to step 11
483
484 * 9) drop busy
485 * 10) return with mapRtRemove to force higher level to call again
486
487 * 11) Lock PTEG
488 * 12) invalidate ptes, no tlbie
489 * 13) unlock PTEG
490 * 14) repeat 11 - 13 for all pages in chunk
491 * 15) if not final chunk, go to step 9
492 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
493 * 17) lock pmap share
494 * 18) find mapping full path - finds all possible list previous elements
495 * 19) upgrade lock to exclusive
496 * 20) remove mapping from search list
497 * 21) drop mapping busy count
498 * 22) drain mapping busy count
499 *
1c79356b
A
500 */
501
502 .align 5
503 .globl EXT(hw_rem_map)
504
505LEXT(hw_rem_map)
1c79356b 506
55e303ae
A
507;
508; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
509; THE HW_PURGE_* ROUTINES ALSO
510;
1c79356b 511
55e303ae
A
512#define hrmStackSize ((31-15+1)*4)+4
513 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
514 mflr r0 ; Save the link register
515 stw r15,FM_ARG0+0x00(r1) ; Save a register
516 stw r16,FM_ARG0+0x04(r1) ; Save a register
517 stw r17,FM_ARG0+0x08(r1) ; Save a register
518 stw r18,FM_ARG0+0x0C(r1) ; Save a register
519 stw r19,FM_ARG0+0x10(r1) ; Save a register
520 mfsprg r19,2 ; Get feature flags
521 stw r20,FM_ARG0+0x14(r1) ; Save a register
522 stw r21,FM_ARG0+0x18(r1) ; Save a register
523 mtcrf 0x02,r19 ; move pf64Bit cr6
524 stw r22,FM_ARG0+0x1C(r1) ; Save a register
525 stw r23,FM_ARG0+0x20(r1) ; Save a register
526 stw r24,FM_ARG0+0x24(r1) ; Save a register
527 stw r25,FM_ARG0+0x28(r1) ; Save a register
528 stw r26,FM_ARG0+0x2C(r1) ; Save a register
529 stw r27,FM_ARG0+0x30(r1) ; Save a register
530 stw r28,FM_ARG0+0x34(r1) ; Save a register
531 stw r29,FM_ARG0+0x38(r1) ; Save a register
532 stw r30,FM_ARG0+0x3C(r1) ; Save a register
533 stw r31,FM_ARG0+0x40(r1) ; Save a register
534 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
535 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
536
91447636
A
537#if DEBUG
538 lwz r11,pmapFlags(r3) ; Get pmaps flags
539 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
540 bne hrmPanic ; Call not valid for guest shadow assist pmap
541#endif
542
55e303ae
A
543 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
544 lwz r9,pmapvr+4(r3) ; Get conversion mask
545 b hrmSF1x ; Done...
546
547hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
548
549hrmSF1x:
550 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
551
552 xor r28,r3,r9 ; Convert the pmap to physical addressing
1c79356b 553
55e303ae
A
554;
555; Here is where we join in from the hw_purge_* routines
556;
1c79356b 557
91447636
A
558hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
559 mfsprg r19,2 ; Get feature flags again (for alternate entries)
1c79356b 560
55e303ae
A
561 mr r17,r11 ; Save the MSR
562 mr r29,r4 ; Top half of vaddr
563 mr r30,r5 ; Bottom half of vaddr
1c79356b 564
91447636
A
565 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
566 bne-- hrmGuest ; Yes, handle specially
567
55e303ae
A
568 la r3,pmapSXlk(r28) ; Point to the pmap search lock
569 bl sxlkShared ; Go get a shared lock on the mapping lists
570 mr. r3,r3 ; Did we get the lock?
571 bne-- hrmBadLock ; Nope...
1c79356b 572
55e303ae
A
573;
574; Note that we do a full search (i.e., no shortcut level skips, etc.)
575; here so that we will know the previous elements so we can dequeue them
576; later. Note: we get back mpFlags in R7.
577;
d7e50217 578
55e303ae
A
579 mr r3,r28 ; Pass in pmap to search
580 mr r4,r29 ; High order of address
581 mr r5,r30 ; Low order of address
582 bl EXT(mapSearchFull) ; Go see if we can find it
91447636
A
583
584 andi. r0,r7,mpPerm ; Mapping marked permanent?
585 crmove cr5_eq,cr0_eq ; Remember permanent marking
55e303ae 586 mr r20,r7 ; Remember mpFlags
55e303ae 587 mr. r31,r3 ; Did we? (And remember mapping address for later)
55e303ae 588 mr r15,r4 ; Save top of next vaddr
55e303ae 589 mr r16,r5 ; Save bottom of next vaddr
91447636 590 beq-- hrmNotFound ; Nope, not found...
55e303ae
A
591
592 bf-- cr5_eq,hrmPerm ; This one can't be removed...
593;
594; Here we try to promote to an exclusive lock. This will fail if someone else
595; has it shared.
596;
1c79356b 597
55e303ae
A
598 la r3,pmapSXlk(r28) ; Point to the pmap search lock
599 bl sxlkPromote ; Try to promote shared to exclusive
600 mr. r3,r3 ; Could we?
601 beq++ hrmGotX ; Yeah...
1c79356b 602
55e303ae
A
603;
604; Since we could not promote our lock, we need to convert to it.
605; That means that we drop the shared lock and wait to get it
606; exclusive. Since we release the lock, we need to do the look up
607; again.
608;
609
610 la r3,pmapSXlk(r28) ; Point to the pmap search lock
611 bl sxlkConvert ; Convert shared to exclusive
612 mr. r3,r3 ; Could we?
613 bne-- hrmBadLock ; Nope, we must have timed out...
614
615 mr r3,r28 ; Pass in pmap to search
616 mr r4,r29 ; High order of address
617 mr r5,r30 ; Low order of address
618 bl EXT(mapSearchFull) ; Rescan the list
619
91447636
A
620 andi. r0,r7,mpPerm ; Mapping marked permanent?
621 crmove cr5_eq,cr0_eq ; Remember permanent marking
55e303ae 622 mr. r31,r3 ; Did we lose it when we converted?
55e303ae 623 mr r20,r7 ; Remember mpFlags
55e303ae
A
624 mr r15,r4 ; Save top of next vaddr
625 mr r16,r5 ; Save bottom of next vaddr
626 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
de355530 627
55e303ae
A
628 bf-- cr5_eq,hrmPerm ; This one can't be removed...
629
630;
631; We have an exclusive lock on the mapping chain. And we
632; also have the busy count bumped in the mapping so it can
633; not vanish on us.
634;
635
636hrmGotX: mr r3,r31 ; Get the mapping
637 bl mapBumpBusy ; Bump up the busy count
1c79356b 638
55e303ae
A
639;
640; Invalidate any PTEs associated with this
641; mapping (more than one if a block) and accumulate the reference
642; and change bits.
643;
644; Here is also where we need to split 32- and 64-bit processing
645;
1c79356b 646
55e303ae
A
647 lwz r21,mpPte(r31) ; Grab the offset to the PTE
648 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
649 mfsdr1 r29 ; Get the hash table base and size
91447636
A
650
651 rlwinm r0,r20,0,mpType ; Isolate mapping type
652 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
653 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
654
55e303ae
A
655 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
656 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
657 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
91447636
A
658 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
659 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
55e303ae
A
660 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
661 andc r29,r29,r2 ; Clean up hash table base
662 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
663 mr r30,r23 ; Move the now merged vaddr to the correct register
664 add r26,r29,r21 ; Point to the PTEG slot
665
666 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
667
668 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
91447636 669 beq- cr5,hrmBlock32 ; Go treat block specially...
55e303ae
A
670 subfic r9,r9,-4 ; Get the PCA entry offset
671 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
672 add r7,r9,r29 ; Point to the PCA slot
55e303ae
A
673
674 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
675
676 lwz r21,mpPte(r31) ; Get the quick pointer again
677 lwz r5,0(r26) ; Get the top of PTE
1c79356b 678
55e303ae 679 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
91447636 680 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
55e303ae
A
681 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
682 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
683 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
1c79356b 684
55e303ae 685 stw r5,0(r26) ; Invalidate the PTE
1c79356b 686
55e303ae 687 li r9,tlbieLock ; Get the TLBIE lock
1c79356b 688
55e303ae
A
689 sync ; Make sure the invalid PTE is actually in memory
690
691hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
692 mr. r5,r5 ; Is it locked?
693 li r5,1 ; Get locked indicator
694 bne- hrmPtlb32 ; It is locked, go spin...
695 stwcx. r5,0,r9 ; Try to get it
696 bne- hrmPtlb32 ; We was beat...
697
698 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
699
700 tlbie r30 ; Invalidate it all corresponding TLB entries
1c79356b 701
55e303ae 702 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
de355530 703
55e303ae
A
704 eieio ; Make sure that the tlbie happens first
705 tlbsync ; Wait for everyone to catch up
706 sync ; Make sure of it all
707
708hrmNTlbs: li r0,0 ; Clear this
709 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
710 stw r0,tlbieLock(0) ; Clear the tlbie lock
711 lis r0,0x8000 ; Get bit for slot 0
712 eieio ; Make sure those RC bit have been stashed in PTE
713
714 srw r0,r0,r2 ; Get the allocation hash mask
715 lwz r22,4(r26) ; Get the latest reference and change bits
716 or r6,r6,r0 ; Show that this slot is free
717
718hrmUlckPCA32:
719 eieio ; Make sure all updates come first
720 stw r6,0(r7) ; Unlock the PTEG
721
722;
723; Now, it is time to remove the mapping and unlock the chain.
724; But first, we need to make sure no one else is using this
725; mapping so we drain the busy now
726;
9bccf70c 727
55e303ae
A
728hrmPysDQ32: mr r3,r31 ; Point to the mapping
729 bl mapDrainBusy ; Go wait until mapping is unused
d7e50217 730
55e303ae
A
731 mr r3,r28 ; Get the pmap to remove from
732 mr r4,r31 ; Point to the mapping
733 bl EXT(mapRemove) ; Remove the mapping from the list
d7e50217 734
55e303ae 735 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
91447636
A
736 rlwinm r0,r20,0,mpType ; Isolate mapping type
737 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
55e303ae
A
738 la r3,pmapSXlk(r28) ; Point to the pmap search lock
739 subi r4,r4,1 ; Drop down the mapped page count
740 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
741 bl sxlkUnlock ; Unlock the search list
742
91447636 743 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
1c79356b 744
55e303ae 745 bl mapPhysFindLock ; Go find and lock the physent
de355530 746
55e303ae
A
747 lwz r9,ppLink+4(r3) ; Get first mapping
748
749 mr r4,r22 ; Get the RC bits we just got
750 bl mapPhysMerge ; Go merge the RC bits
751
91447636 752 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
d7e50217 753
55e303ae
A
754 cmplw r9,r31 ; Are we the first on the list?
755 bne- hrmNot1st ; Nope...
d7e50217 756
55e303ae
A
757 li r9,0 ; Get a 0
758 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
759 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
760 bl mapPhyCSet32 ; Go set the physent link and preserve flags
d7e50217 761
55e303ae 762 b hrmPhyDQd ; Join up and unlock it all...
d7e50217 763
55e303ae 764 .align 5
d7e50217 765
55e303ae
A
766hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
767 and r8,r8,r31 ; Get back to a page
768 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
de355530 769
55e303ae
A
770 la r3,pmapSXlk(r28) ; Point to the pmap search lock
771 bl sxlkUnlock ; Unlock the search list
772
773 xor r3,r31,r8 ; Flip mapping address to virtual
774 ori r3,r3,mapRtPerm ; Set permanent mapping error
775 b hrmErRtn
776
777hrmBadLock: li r3,mapRtBadLk ; Set bad lock
778 b hrmErRtn
779
780hrmEndInSight:
781 la r3,pmapSXlk(r28) ; Point to the pmap search lock
782 bl sxlkUnlock ; Unlock the search list
783
784hrmDoneChunk:
785 mr r3,r31 ; Point to the mapping
786 bl mapDropBusy ; Drop the busy here since we need to come back
787 li r3,mapRtRemove ; Say we are still removing this
788 b hrmErRtn
1c79356b 789
55e303ae
A
790 .align 5
791
792hrmNotFound:
793 la r3,pmapSXlk(r28) ; Point to the pmap search lock
794 bl sxlkUnlock ; Unlock the search list
91447636 795 li r3,mapRtNotFnd ; No mapping found
1c79356b 796
55e303ae 797hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
1c79356b 798
55e303ae
A
799 mtmsr r17 ; Restore enables/translation/etc.
800 isync
801 b hrmRetnCmn ; Join the common return code...
de355530 802
55e303ae
A
803hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
804 isync
805 b hrmRetnCmn ; Join the common return code...
1c79356b
A
806
807 .align 5
1c79356b 808
55e303ae
A
809hrmNot1st: mr. r8,r9 ; Remember and test current node
810 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
811 lwz r9,mpAlias+4(r9) ; Chain to the next
812 cmplw r9,r31 ; Is this us?
813 bne- hrmNot1st ; Not us...
814
815 lwz r9,mpAlias+4(r9) ; Get our forward pointer
816 stw r9,mpAlias+4(r8) ; Unchain us
d7e50217 817
55e303ae
A
818 nop ; For alignment
819
820hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 821
55e303ae
A
822hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
823 mr r3,r31 ; Copy the pointer to the mapping
824 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
825 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 826
55e303ae 827 xor r3,r31,r8 ; Flip mapping address to virtual
1c79356b 828
55e303ae
A
829 mtmsr r17 ; Restore enables/translation/etc.
830 isync
1c79356b 831
55e303ae
A
832hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
833 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
834 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
835 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
836 mr. r6,r6 ; Should we pass back the "next" vaddr?
837 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
838 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
839 mtlr r0 ; Restore the return
840
841 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
842 beq hrmNoNextAdr ; Do not pass back the next vaddr...
843 stw r15,0(r6) ; Pass back the top of the next vaddr
844 stw r16,4(r6) ; Pass back the bottom of the next vaddr
845
846hrmNoNextAdr:
847 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
848 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
849 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
850 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
851 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
852 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
853 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
854 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
855 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
856 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
857 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
858 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
859 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
860 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
861 lwz r1,0(r1) ; Pop the stack
862 blr ; Leave...
863
864;
865; Here is where we come when all is lost. Somehow, we failed a mapping function
866; that must work... All hope is gone. Alas, we die.......
867;
d7e50217 868
55e303ae
A
869hrmPanic: lis r0,hi16(Choke) ; System abend
870 ori r0,r0,lo16(Choke) ; System abend
871 li r3,failMapping ; Show that we failed some kind of mapping thing
872 sc
1c79356b
A
873
874
55e303ae
A
875;
876; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
877; in the range. Then, if we did not finish, return a code indicating that we need to
878; be called again. Eventually, we will finish and then, we will do a TLBIE for each
879; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
880;
881; A potential speed up is that we stop the invalidate loop once we have walked through
882; the hash table once. This really is not worth the trouble because we need to have
883; mapped 1/2 of physical RAM in an individual block. Way unlikely.
884;
885; We should rethink this and see if we think it will be faster to check PTE and
886; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
887;
1c79356b 888
55e303ae 889 .align 5
1c79356b 890
3a60a9f5
A
891hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu
892 rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
55e303ae 893 lhz r25,mpBSize(r31) ; Get the number of pages in block
3a60a9f5 894 lhz r23,mpSpace(r31) ; Get the address space hash
55e303ae 895 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
3a60a9f5
A
896 rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13
897 addi r25,r25,1 ; Account for zero-based counting
55e303ae 898 ori r0,r20,mpRIP ; Turn on the remove in progress flag
3a60a9f5 899 slw r25,r25,r29 ; Adjust for 32MB if needed
55e303ae
A
900 mfsdr1 r29 ; Get the hash table base and size
901 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
3a60a9f5 902 subi r25,r25,1 ; Convert back to zero-based counting
55e303ae
A
903 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
904 sub r4,r25,r9 ; Get number of pages left
905 cmplw cr1,r9,r25 ; Have we already hit the end?
906 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
907 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
908 rlwinm r26,r29,16,7,15 ; Get the hash table size
909 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
910 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
911 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
912 cmpwi cr7,r2,0 ; Remember if we have finished
913 slwi r0,r9,12 ; Make cursor into page offset
914 or r24,r24,r23 ; Get full hash
915 and r4,r4,r2 ; If more than a chunk, bring this back to 0
916 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
917 add r27,r27,r0 ; Adjust vaddr to start of current chunk
918 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
919
920 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
921
922 la r3,pmapSXlk(r28) ; Point to the pmap search lock
923 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
924 bl sxlkUnlock ; Unlock the search list while we are invalidating
925
926 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
927 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
928 xor r24,r24,r8 ; Get the proper VSID
929 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
930 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
931 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
932 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
933 add r22,r22,r30 ; Get end address (in PTEG units)
934
935hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
936 xor r23,r23,r24 ; Hash it
937 and r23,r23,r26 ; Wrap it into the table
938 rlwinm r3,r23,28,4,29 ; Change to PCA offset
939 subfic r3,r3,-4 ; Get the PCA entry offset
940 add r7,r3,r29 ; Point to the PCA slot
941 cmplw cr5,r30,r22 ; Check if we reached the end of the range
942 addi r30,r30,64 ; bump to the next vaddr
943
944 bl mapLockPteg ; Lock the PTEG
945
946 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
947 add r5,r23,r29 ; Point to the PTEG
948 li r0,0 ; Set an invalid PTE value
949 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
950 mtcrf 0x80,r4 ; Set CRs to select PTE slots
951 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 952
55e303ae
A
953 bf 0,hrmSlot0 ; No autogen here
954 stw r0,0x00(r5) ; Invalidate PTE
1c79356b 955
55e303ae
A
956hrmSlot0: bf 1,hrmSlot1 ; No autogen here
957 stw r0,0x08(r5) ; Invalidate PTE
1c79356b 958
55e303ae
A
959hrmSlot1: bf 2,hrmSlot2 ; No autogen here
960 stw r0,0x10(r5) ; Invalidate PTE
1c79356b 961
55e303ae
A
962hrmSlot2: bf 3,hrmSlot3 ; No autogen here
963 stw r0,0x18(r5) ; Invalidate PTE
1c79356b 964
55e303ae
A
965hrmSlot3: bf 4,hrmSlot4 ; No autogen here
966 stw r0,0x20(r5) ; Invalidate PTE
1c79356b 967
55e303ae
A
968hrmSlot4: bf 5,hrmSlot5 ; No autogen here
969 stw r0,0x28(r5) ; Invalidate PTE
1c79356b 970
55e303ae
A
971hrmSlot5: bf 6,hrmSlot6 ; No autogen here
972 stw r0,0x30(r5) ; Invalidate PTE
1c79356b 973
55e303ae
A
974hrmSlot6: bf 7,hrmSlot7 ; No autogen here
975 stw r0,0x38(r5) ; Invalidate PTE
1c79356b 976
55e303ae
A
977hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
978 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
979 andc r6,r6,r0 ; Turn off all the old autogen bits
9bccf70c 980
55e303ae 981hrmBNone32: eieio ; Make sure all updates come first
9bccf70c 982
55e303ae 983 stw r6,0(r7) ; Unlock and set the PCA
1c79356b 984
55e303ae 985 bne+ cr5,hrmBInv32 ; Go invalidate the next...
1c79356b 986
55e303ae 987 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1c79356b 988
55e303ae
A
989 mr r3,r31 ; Copy the pointer to the mapping
990 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1c79356b 991
55e303ae
A
992 sync ; Make sure memory is consistent
993
994 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
995 li r6,63 ; Assume full invalidate for now
996 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
997 andc r6,r6,r5 ; Clear max if we have less to do
998 and r5,r25,r5 ; Clear count if we have more than max
999 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1000 li r7,tlbieLock ; Get the TLBIE lock
1001 or r5,r5,r6 ; Get number of TLBIEs needed
1002
1003hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1004 mr. r2,r2 ; Is it locked?
1005 li r2,1 ; Get our lock value
1006 bne- hrmBTLBlck ; It is locked, go wait...
1007 stwcx. r2,0,r7 ; Try to get it
1008 bne- hrmBTLBlck ; We was beat...
1009
1010hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1011 tlbie r27 ; Invalidate it everywhere
1012 addi r27,r27,0x1000 ; Up to the next page
1013 bge+ hrmBTLBi ; Make sure we have done it all...
1014
1015 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1016 li r2,0 ; Lock clear value
1017
1018 sync ; Make sure all is quiet
1019 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1020
1021 eieio ; Make sure that the tlbie happens first
1022 tlbsync ; Wait for everyone to catch up
1023 sync ; Wait for quiet again
1024
1025hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1026
1027 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1028 bl sxlkShared ; Go get a shared lock on the mapping lists
1029 mr. r3,r3 ; Did we get the lock?
1030 bne- hrmPanic ; Nope...
1031
1032 lwz r4,mpVAddr(r31) ; High order of address
1033 lwz r5,mpVAddr+4(r31) ; Low order of address
1034 mr r3,r28 ; Pass in pmap to search
1035 mr r29,r4 ; Save this in case we need it (only promote fails)
1036 mr r30,r5 ; Save this in case we need it (only promote fails)
1037 bl EXT(mapSearchFull) ; Go see if we can find it
1038
1039 mr. r3,r3 ; Did we? (And remember mapping address for later)
1040 mr r15,r4 ; Save top of next vaddr
1041 mr r16,r5 ; Save bottom of next vaddr
1042 beq- hrmPanic ; Nope, not found...
1043
1044 cmplw r3,r31 ; Same mapping?
1045 bne- hrmPanic ; Not good...
1046
1047 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1048 bl sxlkPromote ; Try to promote shared to exclusive
1049 mr. r3,r3 ; Could we?
1050 mr r3,r31 ; Restore the mapping pointer
1051 beq+ hrmBDone1 ; Yeah...
1052
1053 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1054 bl sxlkConvert ; Convert shared to exclusive
1055 mr. r3,r3 ; Could we?
1056 bne-- hrmPanic ; Nope, we must have timed out...
1057
1058 mr r3,r28 ; Pass in pmap to search
1059 mr r4,r29 ; High order of address
1060 mr r5,r30 ; Low order of address
1061 bl EXT(mapSearchFull) ; Rescan the list
1062
1063 mr. r3,r3 ; Did we lose it when we converted?
1064 mr r15,r4 ; Save top of next vaddr
1065 mr r16,r5 ; Save bottom of next vaddr
1066 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1067
1068hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1069
1070 mr r3,r28 ; Get the pmap to remove from
1071 mr r4,r31 ; Point to the mapping
1072 bl EXT(mapRemove) ; Remove the mapping from the list
1073
1074 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1075 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1076 subi r4,r4,1 ; Drop down the mapped page count
1077 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1078 bl sxlkUnlock ; Unlock the search list
1079
1080 b hrmRetn32 ; We are all done, get out...
1c79356b 1081
55e303ae
A
1082;
1083; Here we handle the 64-bit version of hw_rem_map
1084;
1085
1c79356b 1086 .align 5
55e303ae
A
1087
1088hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
91447636 1089 beq-- cr5,hrmBlock64 ; Go treat block specially...
55e303ae
A
1090 subfic r9,r9,-4 ; Get the PCA entry offset
1091 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1092 add r7,r9,r29 ; Point to the PCA slot
1093
1094 bl mapLockPteg ; Go lock up the PTEG
1095
1096 lwz r21,mpPte(r31) ; Get the quick pointer again
1097 ld r5,0(r26) ; Get the top of PTE
1098
1099 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
91447636 1100 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
55e303ae 1101 sldi r23,r5,16 ; Shift AVPN up to EA format
91447636 1102// **** Need to adjust above shift based on the page size - large pages need to shift a bit more
55e303ae
A
1103 rldicr r5,r5,0,62 ; Clear the valid bit
1104 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1105 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1106 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1107
1108 std r5,0(r26) ; Invalidate the PTE
1109
1110 li r9,tlbieLock ; Get the TLBIE lock
1111
1112 sync ; Make sure the invalid PTE is actually in memory
1113
1114hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1115 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1116 mr. r5,r5 ; Is it locked?
1117 li r5,1 ; Get locked indicator
1118 bne-- hrmPtlb64w ; It is locked, go spin...
1119 stwcx. r5,0,r9 ; Try to get it
1120 bne-- hrmPtlb64 ; We was beat...
1121
91447636 1122 tlbie r23 ; Invalidate all corresponding TLB entries
1c79356b 1123
55e303ae
A
1124 eieio ; Make sure that the tlbie happens first
1125 tlbsync ; Wait for everyone to catch up
55e303ae
A
1126
1127 ptesync ; Make sure of it all
1128 li r0,0 ; Clear this
1129 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1130 stw r0,tlbieLock(0) ; Clear the tlbie lock
1131 oris r0,r0,0x8000 ; Assume slot 0
91447636 1132
55e303ae 1133 srw r0,r0,r2 ; Get slot mask to deallocate
d7e50217 1134
55e303ae
A
1135 lwz r22,12(r26) ; Get the latest reference and change bits
1136 or r6,r6,r0 ; Make the guy we killed free
de355530 1137
55e303ae
A
1138hrmUlckPCA64:
1139 eieio ; Make sure all updates come first
1140
1141 stw r6,0(r7) ; Unlock and change the PCA
1142
1143hrmPysDQ64: mr r3,r31 ; Point to the mapping
1144 bl mapDrainBusy ; Go wait until mapping is unused
1145
91447636 1146 mr r3,r28 ; Get the pmap to remove from
55e303ae
A
1147 mr r4,r31 ; Point to the mapping
1148 bl EXT(mapRemove) ; Remove the mapping from the list
1149
91447636
A
1150 rlwinm r0,r20,0,mpType ; Isolate mapping type
1151 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
55e303ae 1152 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
55e303ae
A
1153 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1154 subi r4,r4,1 ; Drop down the mapped page count
1155 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1156 bl sxlkUnlock ; Unlock the search list
1157
91447636 1158 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1c79356b 1159
55e303ae 1160 bl mapPhysFindLock ; Go find and lock the physent
1c79356b 1161
91447636 1162 li r0,ppLFAmask ; Get mask to clean up mapping pointer
55e303ae 1163 ld r9,ppLink(r3) ; Get first mapping
91447636 1164 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae 1165 mr r4,r22 ; Get the RC bits we just got
1c79356b 1166
55e303ae 1167 bl mapPhysMerge ; Go merge the RC bits
d7e50217 1168
55e303ae 1169 andc r9,r9,r0 ; Clean up the mapping pointer
d7e50217 1170
55e303ae 1171 cmpld r9,r31 ; Are we the first on the list?
91447636 1172 bne-- hrmNot1st64 ; Nope...
1c79356b 1173
55e303ae
A
1174 li r9,0 ; Get a 0
1175 ld r4,mpAlias(r31) ; Get our forward pointer
1176
1177 std r9,mpAlias(r31) ; Make sure we are off the chain
1178 bl mapPhyCSet64 ; Go set the physent link and preserve flags
de355530 1179
55e303ae
A
1180 b hrmPhyDQd64 ; Join up and unlock it all...
1181
1182hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1183 stwcx. r5,0,r5 ; Clear the pending reservation
de355530 1184
d7e50217 1185
55e303ae
A
1186hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1187 mr. r5,r5 ; is it locked?
1188 beq++ hrmPtlb64 ; Nope...
1189 b hrmPtlb64x ; Sniff some more...
1190
1191 .align 5
1192
1193hrmNot1st64:
1194 mr. r8,r9 ; Remember and test current node
91447636 1195 beq-- hrmPhyDQd64 ; Could not find our node...
55e303ae
A
1196 ld r9,mpAlias(r9) ; Chain to the next
1197 cmpld r9,r31 ; Is this us?
91447636 1198 bne-- hrmNot1st64 ; Not us...
55e303ae
A
1199
1200 ld r9,mpAlias(r9) ; Get our forward pointer
1201 std r9,mpAlias(r8) ; Unchain us
1202
1203 nop ; For alignment
1204
1205hrmPhyDQd64:
1206 bl mapPhysUnlock ; Unlock the physent chain
1c79356b 1207
55e303ae
A
1208hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1209 mr r3,r31 ; Copy the pointer to the mapping
1210 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1211 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 1212
55e303ae 1213 xor r3,r31,r8 ; Flip mapping address to virtual
d7e50217 1214
55e303ae 1215 mtmsrd r17 ; Restore enables/translation/etc.
de355530 1216 isync
55e303ae
A
1217
1218 b hrmRetnCmn ; Join the common return path...
1c79356b 1219
1c79356b 1220
55e303ae
A
1221;
1222; Check hrmBlock32 for comments.
1223;
1c79356b 1224
de355530 1225 .align 5
55e303ae 1226
3a60a9f5
A
1227hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu
1228 rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
55e303ae
A
1229 lhz r24,mpSpace(r31) ; Get the address space hash
1230 lhz r25,mpBSize(r31) ; Get the number of pages in block
1231 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
3a60a9f5
A
1232 rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13
1233 addi r25,r25,1 ; Account for zero-based counting
55e303ae 1234 ori r0,r20,mpRIP ; Turn on the remove in progress flag
3a60a9f5 1235 slw r25,r25,r29 ; Adjust for 32MB if needed
55e303ae
A
1236 mfsdr1 r29 ; Get the hash table base and size
1237 ld r27,mpVAddr(r31) ; Get the base vaddr
3a60a9f5 1238 subi r25,r25,1 ; Convert back to zero-based counting
55e303ae
A
1239 rlwinm r5,r29,0,27,31 ; Isolate the size
1240 sub r4,r25,r9 ; Get number of pages left
1241 cmplw cr1,r9,r25 ; Have we already hit the end?
1242 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1243 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1244 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1245 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1246 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1247 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1248 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1249 srdi r27,r27,12 ; Change address into page index
1250 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1251 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1252
1253 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1254
1255 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1256 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1257 bl sxlkUnlock ; Unlock the search list while we are invalidating
1258
1259 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1260 eqv r26,r26,r26 ; Get all foxes here
1261 rldimi r24,r24,28,8 ; Make a couple copies up higher
1262 rldicr r29,r29,0,47 ; Isolate just the hash table base
1263 subfic r5,r5,46 ; Get number of leading zeros
1264 srd r26,r26,r5 ; Shift the size bits over
1265 mr r30,r27 ; Get start of chunk to invalidate
1266 rldicr r26,r26,0,56 ; Make length in PTEG units
1267 add r22,r4,r30 ; Get end page number
1268
1269hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1270 rldicr r0,r0,0,49 ; Clean all but segment portion
1271 rlwinm r2,r30,0,16,31 ; Get the current page index
1272 xor r0,r0,r24 ; Form VSID
1273 xor r8,r2,r0 ; Hash the vaddr
1274 sldi r8,r8,7 ; Make into PTEG offset
1275 and r23,r8,r26 ; Wrap into the hash table
1276 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1277 subfic r3,r3,-4 ; Get the PCA entry offset
1278 add r7,r3,r29 ; Point to the PCA slot
1279
1280 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1281
1282 bl mapLockPteg ; Lock the PTEG
1283
1284 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1285 add r5,r23,r29 ; Point to the PTEG
1286 li r0,0 ; Set an invalid PTE value
1287 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1288 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1289 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 1290
1c79356b 1291
55e303ae
A
1292 bf 0,hrmSlot0s ; No autogen here
1293 std r0,0x00(r5) ; Invalidate PTE
1c79356b 1294
55e303ae
A
1295hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1296 std r0,0x10(r5) ; Invalidate PTE
1c79356b 1297
55e303ae
A
1298hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1299 std r0,0x20(r5) ; Invalidate PTE
d7e50217 1300
55e303ae
A
1301hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1302 std r0,0x30(r5) ; Invalidate PTE
d7e50217 1303
55e303ae
A
1304hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1305 std r0,0x40(r5) ; Invalidate PTE
d7e50217 1306
55e303ae
A
1307hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1308 std r0,0x50(r5) ; Invalidate PTE
d7e50217 1309
55e303ae
A
1310hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1311 std r0,0x60(r5) ; Invalidate PTE
d7e50217 1312
55e303ae
A
1313hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1314 std r0,0x70(r5) ; Invalidate PTE
d7e50217 1315
55e303ae
A
1316hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1317 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1318 andc r6,r6,r0 ; Turn off all the old autogen bits
1319
1320hrmBNone64: eieio ; Make sure all updates come first
1321 stw r6,0(r7) ; Unlock and set the PCA
1322
1323 addi r30,r30,1 ; bump to the next PTEG
1324 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1325
1326 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1327
1328 mr r3,r31 ; Copy the pointer to the mapping
1329 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1330
1331 sync ; Make sure memory is consistent
1332
1333 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1334 li r6,255 ; Assume full invalidate for now
1335 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1336 andc r6,r6,r5 ; Clear max if we have less to do
1337 and r5,r25,r5 ; Clear count if we have more than max
1338 sldi r24,r24,28 ; Get the full XOR value over to segment position
1339 ld r27,mpVAddr(r31) ; Get the base vaddr
1340 li r7,tlbieLock ; Get the TLBIE lock
1341 or r5,r5,r6 ; Get number of TLBIEs needed
1c79356b 1342
55e303ae
A
1343hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1344 mr. r2,r2 ; Is it locked?
1345 li r2,1 ; Get our lock value
1346 bne-- hrmBTLBlcm ; It is locked, go wait...
1347 stwcx. r2,0,r7 ; Try to get it
1348 bne-- hrmBTLBlcl ; We was beat...
1349
1350hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1351 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1352 addic. r5,r5,-1 ; See if we did them all
1353 xor r2,r2,r24 ; Make the VSID
1354 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1355 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1c79356b 1356
55e303ae
A
1357 tlbie r2 ; Invalidate it everywhere
1358 addi r27,r27,0x1000 ; Up to the next page
1359 bge++ hrmBTLBj ; Make sure we have done it all...
1c79356b 1360
55e303ae
A
1361 eieio ; Make sure that the tlbie happens first
1362 tlbsync ; wait for everyone to catch up
1c79356b 1363
55e303ae 1364 li r2,0 ; Lock clear value
d7e50217 1365
55e303ae 1366 ptesync ; Wait for quiet again
55e303ae
A
1367
1368 stw r2,tlbieLock(0) ; Clear the tlbie lock
1369
1370 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1371 bl sxlkShared ; Go get a shared lock on the mapping lists
1372 mr. r3,r3 ; Did we get the lock?
1373 bne- hrmPanic ; Nope...
1374
1375 lwz r4,mpVAddr(r31) ; High order of address
1376 lwz r5,mpVAddr+4(r31) ; Low order of address
1377 mr r3,r28 ; Pass in pmap to search
1378 mr r29,r4 ; Save this in case we need it (only promote fails)
1379 mr r30,r5 ; Save this in case we need it (only promote fails)
1380 bl EXT(mapSearchFull) ; Go see if we can find it
1381
1382 mr. r3,r3 ; Did we? (And remember mapping address for later)
1383 mr r15,r4 ; Save top of next vaddr
1384 mr r16,r5 ; Save bottom of next vaddr
1385 beq- hrmPanic ; Nope, not found...
1386
1387 cmpld r3,r31 ; Same mapping?
1388 bne- hrmPanic ; Not good...
1389
1390 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1391 bl sxlkPromote ; Try to promote shared to exclusive
1392 mr. r3,r3 ; Could we?
1393 mr r3,r31 ; Restore the mapping pointer
1394 beq+ hrmBDone2 ; Yeah...
1395
1396 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1397 bl sxlkConvert ; Convert shared to exclusive
1398 mr. r3,r3 ; Could we?
1399 bne-- hrmPanic ; Nope, we must have timed out...
1400
1401 mr r3,r28 ; Pass in pmap to search
1402 mr r4,r29 ; High order of address
1403 mr r5,r30 ; Low order of address
1404 bl EXT(mapSearchFull) ; Rescan the list
1405
1406 mr. r3,r3 ; Did we lose it when we converted?
1407 mr r15,r4 ; Save top of next vaddr
1408 mr r16,r5 ; Save bottom of next vaddr
1409 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1410
1411hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1412
1413 mr r3,r28 ; Get the pmap to remove from
1414 mr r4,r31 ; Point to the mapping
1415 bl EXT(mapRemove) ; Remove the mapping from the list
1416
1417 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1418 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1419 subi r4,r4,1 ; Drop down the mapped page count
1420 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1421 bl sxlkUnlock ; Unlock the search list
1422
1423 b hrmRetn64 ; We are all done, get out...
1424
1425hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1426 stwcx. r2,0,r2 ; Unreserve it
1427
1428hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1429 mr. r2,r2 ; Is it held?
1430 beq++ hrmBTLBlcl ; Nope...
1431 b hrmBTLBlcn ; Yeah...
1c79356b 1432
91447636
A
1433;
1434; Guest shadow assist -- mapping remove
1435;
1436; Method of operation:
1437; o Locate the VMM extension block and the host pmap
1438; o Obtain the host pmap's search lock exclusively
1439; o Locate the requested mapping in the shadow hash table,
1440; exit if not found
1441; o If connected, disconnect the PTE and gather R&C to physent
1442; o Locate and lock the physent
1443; o Remove mapping from physent's chain
1444; o Unlock physent
1445; o Unlock pmap's search lock
1446;
1447; Non-volatile registers on entry:
1448; r17: caller's msr image
1449; r19: sprg2 (feature flags)
1450; r28: guest pmap's physical address
1451; r29: high-order 32 bits of guest virtual address
1452; r30: low-order 32 bits of guest virtual address
1453;
1454; Non-volatile register usage:
1455; r26: VMM extension block's physical address
1456; r27: host pmap's physical address
1457; r28: guest pmap's physical address
1458; r29: physent's physical address
1459; r30: guest virtual address
1460; r31: guest mapping's physical address
1461;
1462 .align 5
1463hrmGuest:
1464 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1465 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1466 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1467 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1468 b hrmGStart ; Join common code
1469
1470hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1471 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1472 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1473
1474hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1475 bl sxlkExclusive ; Get lock exclusive
1476
1477 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1478
1479 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1480 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1481 srwi r11,r30,12 ; Form shadow hash:
1482 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1483 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1484 ; Form index offset from hash page number
1485 add r31,r31,r12 ; r31 <- hash page index entry
1486 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1487 mtctr r0 ; in this group
1488 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1489 lwz r31,4(r31) ; r31 <- hash page paddr
1490 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1491 ; r31 <- hash group paddr
1492
1493 addi r3,r3,1 ; Increment remove request count
1494 stw r3,vxsGrm(r26) ; Update remove request count
1495
1496 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1497 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1498 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1499 b hrmG32SrchLp ; Let the search begin!
1500
1501 .align 5
1502hrmG32SrchLp:
1503 mr r6,r3 ; r6 <- current mapping slot's flags
1504 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1505 mr r7,r4 ; r7 <- current mapping slot's space ID
1506 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1507 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1508 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1509 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1510 xor r7,r7,r9 ; Compare space ID
1511 or r0,r11,r7 ; r0 <- !(free && space match)
1512 xor r8,r8,r30 ; Compare virtual address
1513 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1514 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1515
1516 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1517 bdnz hrmG32SrchLp ; Iterate
1518
1519 mr r6,r3 ; r6 <- current mapping slot's flags
1520 clrrwi r5,r5,12 ; Remove flags from virtual address
1521 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1522 xor r4,r4,r9 ; Compare space ID
1523 or r0,r11,r4 ; r0 <- !(free && space match)
1524 xor r5,r5,r30 ; Compare virtual address
1525 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1526 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1527 b hrmGSrchMiss ; No joy in our hash group
1528
1529hrmG64Search:
1530 ld r31,0(r31) ; r31 <- hash page paddr
1531 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1532 ; r31 <- hash group paddr
1533 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1534 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1535 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1536 b hrmG64SrchLp ; Let the search begin!
1537
1538 .align 5
1539hrmG64SrchLp:
1540 mr r6,r3 ; r6 <- current mapping slot's flags
1541 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1542 mr r7,r4 ; r7 <- current mapping slot's space ID
1543 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1544 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1545 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1546 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1547 xor r7,r7,r9 ; Compare space ID
1548 or r0,r11,r7 ; r0 <- !(free && space match)
1549 xor r8,r8,r30 ; Compare virtual address
1550 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1551 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1552
1553 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1554 bdnz hrmG64SrchLp ; Iterate
1555
1556 mr r6,r3 ; r6 <- current mapping slot's flags
1557 clrrdi r5,r5,12 ; Remove flags from virtual address
1558 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1559 xor r4,r4,r9 ; Compare space ID
1560 or r0,r11,r4 ; r0 <- !(free && space match)
1561 xor r5,r5,r30 ; Compare virtual address
1562 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1563 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1564hrmGSrchMiss:
1565 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1566 li r25,mapRtNotFnd ; Return not found
1567 addi r3,r3,1 ; Increment miss count
1568 stw r3,vxsGrmMiss(r26) ; Update miss count
1569 b hrmGReturn ; Join guest return
1570
1571 .align 5
1572hrmGSrchHit:
1573 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1574 bne hrmGDormant ; Yes, nothing to disconnect
1575
1576 lwz r3,vxsGrmActive(r26) ; Get active hit count
1577 addi r3,r3,1 ; Increment active hit count
1578 stw r3,vxsGrmActive(r26) ; Update hit count
1579
1580 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1581 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1582 ; r31 <- mapping's physical address
1583 ; r3 -> PTE slot physical address
1584 ; r4 -> High-order 32 bits of PTE
1585 ; r5 -> Low-order 32 bits of PTE
1586 ; r6 -> PCA
1587 ; r7 -> PCA physical address
1588 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1589 b hrmGFreePTE ; Join 64-bit path to release the PTE
1590hrmGDscon64:
1591 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1592 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1593hrmGFreePTE:
1594 mr. r3,r3 ; Was there a valid PTE?
1595 beq hrmGDormant ; No valid PTE, we're almost done
1596 lis r0,0x8000 ; Prepare free bit for this slot
1597 srw r0,r0,r2 ; Position free bit
1598 or r6,r6,r0 ; Set it in our PCA image
1599 lwz r8,mpPte(r31) ; Get PTE offset
1600 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1601 stw r8,mpPte(r31) ; Save invalidated PTE offset
1602 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1603 stw r6,0(r7) ; Update PCA and unlock the PTEG
1604
1605hrmGDormant:
1606 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1607 bl mapFindLockPN ; Find 'n' lock this page's physent
1608 mr. r29,r3 ; Got lock on our physent?
1609 beq-- hrmGBadPLock ; No, time to bail out
1610
1611 crset cr1_eq ; cr1_eq <- previous link is the anchor
1612 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1613 la r11,ppLink+4(r29) ; Point to chain anchor
1614 lwz r9,ppLink+4(r29) ; Get chain anchor
1615 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1616hrmGRemLoop:
1617 beq- hrmGPEMissMiss ; End of chain, this is not good
1618 cmplw r9,r31 ; Is this the mapping to remove?
1619 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1620 bne hrmGRemNext ; No, chain onward
1621 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1622 stw r8,0(r11) ; Unchain gpv->phys mapping
1623 b hrmGDelete ; Finish deleting mapping
1624hrmGRemRetry:
1625 lwarx r0,0,r11 ; Get previous link
1626 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1627 stwcx. r0,0,r11 ; Update previous link
1628 bne- hrmGRemRetry ; Lost reservation, retry
1629 b hrmGDelete ; Finish deleting mapping
1630
1631hrmGRemNext:
1632 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1633 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1634 mr. r9,r8 ; Does next entry exist?
1635 b hrmGRemLoop ; Carry on
1636
1637hrmGRemove64:
1638 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1639 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1640 la r11,ppLink(r29) ; Point to chain anchor
1641 ld r9,ppLink(r29) ; Get chain anchor
1642 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1643hrmGRem64Lp:
1644 beq-- hrmGPEMissMiss ; End of chain, this is not good
1645 cmpld r9,r31 ; Is this the mapping to remove?
1646 ld r8,mpAlias(r9) ; Get forward chain pinter
1647 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1648 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1649 std r8,0(r11) ; Unchain gpv->phys mapping
1650 b hrmGDelete ; Finish deleting mapping
1651hrmGRem64Rt:
1652 ldarx r0,0,r11 ; Get previous link
1653 and r0,r0,r7 ; Get flags
1654 or r0,r0,r8 ; Insert new forward pointer
1655 stdcx. r0,0,r11 ; Slam it back in
1656 bne-- hrmGRem64Rt ; Lost reservation, retry
1657 b hrmGDelete ; Finish deleting mapping
1658
1659 .align 5
1660hrmGRem64Nxt:
1661 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1662 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1663 mr. r9,r8 ; Does next entry exist?
1664 b hrmGRem64Lp ; Carry on
1665
1666hrmGDelete:
1667 mr r3,r29 ; r3 <- physent addr
1668 bl mapPhysUnlock ; Unlock physent chain
1669 lwz r3,mpFlags(r31) ; Get mapping's flags
1670 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1671 ori r3,r3,mpgFree ; Mark mapping free
1672 stw r3,mpFlags(r31) ; Update flags
1673 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1674
1675hrmGReturn:
1676 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1677 bl sxlkUnlock ; Release host pmap search lock
1678
1679 mr r3,r25 ; r3 <- return code
1680 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1681 mtmsr r17 ; Restore 'rupts, translation
1682 isync ; Throw a small wrench into the pipeline
1683 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1684hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1685 b hrmRetnCmn ; Join common return
1686
1687hrmGBadPLock:
1688hrmGPEMissMiss:
1689 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1690 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1691 li r3,failMapping ; All the way from New Orleans
1692 sc ; To Jeruselem
1c79356b
A
1693
1694
1695/*
55e303ae 1696 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1c79356b 1697 *
55e303ae 1698 * Upon entry, R3 contains a pointer to a physent.
1c79356b 1699 *
55e303ae
A
1700 * This function removes the first mapping from a physical entry
1701 * alias list. It locks the list, extracts the vaddr and pmap from
1702 * the first entry. It then jumps into the hw_rem_map function.
1703 * NOTE: since we jump into rem_map, we need to set up the stack
1704 * identically. Also, we set the next parm to 0 so we do not
1705 * try to save a next vaddr.
1706 *
1707 * We return the virtual address of the removed mapping as a
1708 * R3.
de355530 1709 *
55e303ae 1710 * Note that this is designed to be called from 32-bit mode with a stack.
de355530 1711 *
55e303ae
A
1712 * We disable translation and all interruptions here. This keeps is
1713 * from having to worry about a deadlock due to having anything locked
1714 * and needing it to process a fault.
1c79356b 1715 *
55e303ae
A
1716 * Note that this must be done with both interruptions off and VM off
1717 *
1718 *
1719 * Remove mapping via physical page (mapping_purge)
1720 *
1721 * 1) lock physent
1722 * 2) extract vaddr and pmap
1723 * 3) unlock physent
1724 * 4) do "remove mapping via pmap"
1725 *
1c79356b 1726 *
1c79356b
A
1727 */
1728
1729 .align 5
55e303ae
A
1730 .globl EXT(hw_purge_phys)
1731
1732LEXT(hw_purge_phys)
1733 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1734 mflr r0 ; Save the link register
1735 stw r15,FM_ARG0+0x00(r1) ; Save a register
1736 stw r16,FM_ARG0+0x04(r1) ; Save a register
1737 stw r17,FM_ARG0+0x08(r1) ; Save a register
1738 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1739 stw r19,FM_ARG0+0x10(r1) ; Save a register
1740 stw r20,FM_ARG0+0x14(r1) ; Save a register
1741 stw r21,FM_ARG0+0x18(r1) ; Save a register
1742 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1743 stw r23,FM_ARG0+0x20(r1) ; Save a register
1744 stw r24,FM_ARG0+0x24(r1) ; Save a register
1745 stw r25,FM_ARG0+0x28(r1) ; Save a register
1746 li r6,0 ; Set no next address return
1747 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1748 stw r27,FM_ARG0+0x30(r1) ; Save a register
1749 stw r28,FM_ARG0+0x34(r1) ; Save a register
1750 stw r29,FM_ARG0+0x38(r1) ; Save a register
1751 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1752 stw r31,FM_ARG0+0x40(r1) ; Save a register
1753 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1754 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1755
1756 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1757
1758 bl mapPhysLock ; Lock the physent
1759
1760 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1761
1762 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
91447636 1763 li r0,ppFlags ; Set the bottom stuff to clear
55e303ae
A
1764 b hppJoin ; Join the common...
1765
91447636 1766hppSF: li r0,ppLFAmask
55e303ae 1767 ld r12,ppLink(r3) ; Get the pointer to the first mapping
91447636 1768 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
1769
1770hppJoin: andc. r12,r12,r0 ; Clean and test link
1771 beq-- hppNone ; There are no more mappings on physical page
1772
1773 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1774 lhz r7,mpSpace(r12) ; Get the address space hash
1775 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1776 slwi r0,r7,2 ; Multiply space by 4
1777 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1778 slwi r7,r7,3 ; Multiply space by 8
1779 lwz r5,mpVAddr+4(r12) ; and the bottom
1780 add r7,r7,r0 ; Get correct displacement into translate table
1781 lwz r28,0(r28) ; Get the actual translation map
de355530 1782
55e303ae
A
1783 add r28,r28,r7 ; Point to the pmap translation
1784
1785 bl mapPhysUnlock ; Time to unlock the physical entry
1786
1787 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1788
1789 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1790 b hrmJoin ; Go remove the mapping...
1791
1792hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1793 b hrmJoin ; Go remove the mapping...
d7e50217 1794
de355530 1795 .align 5
55e303ae
A
1796
1797hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1798
1799 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1800
1801 mtmsr r11 ; Restore enables/translation/etc.
1802 isync
1803 b hppRetnCmn ; Join the common return code...
1c79356b 1804
55e303ae
A
1805hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1806 isync
1c79356b 1807
55e303ae
A
1808;
1809; NOTE: we have not used any registers other than the volatiles to this point
1810;
1c79356b 1811
55e303ae 1812hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1c79356b 1813
91447636 1814 li r3,mapRtEmpty ; Physent chain is empty
55e303ae
A
1815 mtlr r12 ; Restore the return
1816 lwz r1,0(r1) ; Pop the stack
1817 blr ; Leave...
1c79356b
A
1818
1819/*
55e303ae
A
1820 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1821 *
1822 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1823 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1824 *
1825 * We return the virtual address of the removed mapping as a
1826 * R3.
1827 *
1828 * Note that this is designed to be called from 32-bit mode with a stack.
1829 *
1830 * We disable translation and all interruptions here. This keeps is
1831 * from having to worry about a deadlock due to having anything locked
1832 * and needing it to process a fault.
1833 *
1834 * Note that this must be done with both interruptions off and VM off
1835 *
1836 * Remove a mapping which can be reestablished by VM
1837 *
1c79356b 1838 */
1c79356b 1839
55e303ae
A
1840 .align 5
1841 .globl EXT(hw_purge_map)
1842
1843LEXT(hw_purge_map)
1844 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1845 mflr r0 ; Save the link register
1846 stw r15,FM_ARG0+0x00(r1) ; Save a register
1847 stw r16,FM_ARG0+0x04(r1) ; Save a register
1848 stw r17,FM_ARG0+0x08(r1) ; Save a register
1849 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1850 stw r19,FM_ARG0+0x10(r1) ; Save a register
1851 mfsprg r19,2 ; Get feature flags
1852 stw r20,FM_ARG0+0x14(r1) ; Save a register
1853 stw r21,FM_ARG0+0x18(r1) ; Save a register
1854 mtcrf 0x02,r19 ; move pf64Bit cr6
1855 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1856 stw r23,FM_ARG0+0x20(r1) ; Save a register
1857 stw r24,FM_ARG0+0x24(r1) ; Save a register
1858 stw r25,FM_ARG0+0x28(r1) ; Save a register
1859 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1860 stw r27,FM_ARG0+0x30(r1) ; Save a register
1861 stw r28,FM_ARG0+0x34(r1) ; Save a register
1862 stw r29,FM_ARG0+0x38(r1) ; Save a register
1863 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1864 stw r31,FM_ARG0+0x40(r1) ; Save a register
1865 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1866 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1867
91447636
A
1868#if DEBUG
1869 lwz r11,pmapFlags(r3) ; Get pmaps flags
1870 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1871 bne hpmPanic ; Call not valid for guest shadow assist pmap
1872#endif
1873
55e303ae
A
1874 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1875 lwz r9,pmapvr+4(r3) ; Get conversion mask
1876 b hpmSF1x ; Done...
1877
1878hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1879
1880hpmSF1x:
1881 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1882
1883 xor r28,r3,r9 ; Convert the pmap to physical addressing
1884
1885 mr r17,r11 ; Save the MSR
1886
1887 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1888 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1889 mr. r3,r3 ; Did we get the lock?
1890 bne-- hrmBadLock ; Nope...
1891;
1892; Note that we do a full search (i.e., no shortcut level skips, etc.)
1893; here so that we will know the previous elements so we can dequeue them
1894; later.
1895;
1896hpmSearch:
1897 mr r3,r28 ; Pass in pmap to search
1898 mr r29,r4 ; Top half of vaddr
1899 mr r30,r5 ; Bottom half of vaddr
1900 bl EXT(mapSearchFull) ; Rescan the list
1901 mr. r31,r3 ; Did we? (And remember mapping address for later)
1902 or r0,r4,r5 ; Are we beyond the end?
1903 mr r15,r4 ; Save top of next vaddr
1904 cmplwi cr1,r0,0 ; See if there is another
1905 mr r16,r5 ; Save bottom of next vaddr
1906 bne-- hpmGotOne ; We found one, go check it out...
1907
1908hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1909 b hrmNotFound ; No more in pmap to check...
1910
1911hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
91447636 1912 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
ab86ba33
A
1913 rlwinm r21,r20,8,24,31 ; Extract the busy count
1914 cmplwi cr2,r21,0 ; Is it busy?
1915 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
55e303ae
A
1916 beq++ hrmGotX ; Found, branch to remove the mapping...
1917 b hpmCNext ; Nope...
1c79356b 1918
91447636
A
1919hpmPanic: lis r0,hi16(Choke) ; System abend
1920 ori r0,r0,lo16(Choke) ; System abend
1921 li r3,failMapping ; Show that we failed some kind of mapping thing
1922 sc
1923
55e303ae
A
1924/*
1925 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1926 *
1927 * Upon entry, R3 contains a pointer to a pmap.
1928 * pa is a pointer to the physent
1929 *
1930 * This function removes the first mapping for a specific pmap from a physical entry
1931 * alias list. It locks the list, extracts the vaddr and pmap from
1932 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1933 * NOTE: since we jump into rem_map, we need to set up the stack
1934 * identically. Also, we set the next parm to 0 so we do not
1935 * try to save a next vaddr.
1936 *
1937 * We return the virtual address of the removed mapping as a
1938 * R3.
1939 *
1940 * Note that this is designed to be called from 32-bit mode with a stack.
1941 *
1942 * We disable translation and all interruptions here. This keeps is
1943 * from having to worry about a deadlock due to having anything locked
1944 * and needing it to process a fault.
1945 *
1946 * Note that this must be done with both interruptions off and VM off
1947 *
1948 *
1949 * Remove mapping via physical page (mapping_purge)
1950 *
1951 * 1) lock physent
1952 * 2) extract vaddr and pmap
1953 * 3) unlock physent
1954 * 4) do "remove mapping via pmap"
1955 *
1956 *
1957 */
1c79356b 1958
55e303ae
A
1959 .align 5
1960 .globl EXT(hw_purge_space)
1961
1962LEXT(hw_purge_space)
1963 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1964 mflr r0 ; Save the link register
1965 stw r15,FM_ARG0+0x00(r1) ; Save a register
1966 stw r16,FM_ARG0+0x04(r1) ; Save a register
1967 stw r17,FM_ARG0+0x08(r1) ; Save a register
1968 mfsprg r2,2 ; Get feature flags
1969 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1970 stw r19,FM_ARG0+0x10(r1) ; Save a register
1971 stw r20,FM_ARG0+0x14(r1) ; Save a register
1972 stw r21,FM_ARG0+0x18(r1) ; Save a register
1973 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1974 mtcrf 0x02,r2 ; move pf64Bit cr6
1975 stw r23,FM_ARG0+0x20(r1) ; Save a register
1976 stw r24,FM_ARG0+0x24(r1) ; Save a register
1977 stw r25,FM_ARG0+0x28(r1) ; Save a register
1978 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1979 stw r27,FM_ARG0+0x30(r1) ; Save a register
1980 li r6,0 ; Set no next address return
1981 stw r28,FM_ARG0+0x34(r1) ; Save a register
1982 stw r29,FM_ARG0+0x38(r1) ; Save a register
1983 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1984 stw r31,FM_ARG0+0x40(r1) ; Save a register
1985 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1986 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1987
91447636
A
1988#if DEBUG
1989 lwz r11,pmapFlags(r4) ; Get pmaps flags
1990 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1991 bne hpsPanic ; Call not valid for guest shadow assist pmap
1992#endif
1993
55e303ae
A
1994 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
1995
1996 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
1997
1998 b hpsSF1x ; Done...
1999
2000hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2001
2002hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2003
2004 xor r4,r4,r9 ; Convert the pmap to physical addressing
2005
2006 bl mapPhysLock ; Lock the physent
2007
2008 lwz r8,pmapSpace(r4) ; Get the space hash
2009
2010 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2011
2012 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2013
91447636 2014hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
55e303ae
A
2015 beq hpsNone ; Did not find one...
2016
2017 lhz r10,mpSpace(r12) ; Get the space
2018
2019 cmplw r10,r8 ; Is this one of ours?
2020 beq hpsFnd ; Yes...
2021
2022 lwz r12,mpAlias+4(r12) ; Chain on to the next
2023 b hpsSrc32 ; Check it out...
1c79356b 2024
55e303ae
A
2025 .align 5
2026
91447636 2027hpsSF: li r0,ppLFAmask
55e303ae 2028 ld r12,ppLink(r3) ; Get the pointer to the first mapping
91447636 2029 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
2030
2031hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2032 beq hpsNone ; Did not find one...
2033
2034 lhz r10,mpSpace(r12) ; Get the space
2035
2036 cmplw r10,r8 ; Is this one of ours?
2037 beq hpsFnd ; Yes...
2038
2039 ld r12,mpAlias(r12) ; Chain on to the next
2040 b hpsSrc64 ; Check it out...
2041
2042 .align 5
1c79356b 2043
55e303ae
A
2044hpsFnd: mr r28,r4 ; Set the pmap physical address
2045 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2046 lwz r5,mpVAddr+4(r12) ; and the bottom
2047
2048 bl mapPhysUnlock ; Time to unlock the physical entry
2049 b hrmJoin ; Go remove the mapping...
2050
2051 .align 5
2052
2053hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 2054
55e303ae 2055 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 2056
55e303ae
A
2057 mtmsr r11 ; Restore enables/translation/etc.
2058 isync
2059 b hpsRetnCmn ; Join the common return code...
1c79356b 2060
55e303ae
A
2061hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2062 isync
1c79356b 2063
55e303ae
A
2064;
2065; NOTE: we have not used any registers other than the volatiles to this point
2066;
d7e50217 2067
55e303ae
A
2068hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2069
91447636 2070 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
55e303ae
A
2071 mtlr r12 ; Restore the return
2072 lwz r1,0(r1) ; Pop the stack
2073 blr ; Leave...
1c79356b 2074
91447636
A
2075hpsPanic: lis r0,hi16(Choke) ; System abend
2076 ori r0,r0,lo16(Choke) ; System abend
2077 li r3,failMapping ; Show that we failed some kind of mapping thing
2078 sc
2079
2080/*
2081 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2082 * on this physent chain
2083 *
2084 * Locates the first guest mapping on the physent chain that is associated with the
2085 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2086 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2087 * repeatedly until no additional guest mappings that match our criteria are removed.
2088 *
2089 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2090 *
2091 * Parameters:
2092 * r3 : physent, 32-bit kernel virtual address
2093 * r4 : host pmap, 32-bit kernel virtual address
2094 *
2095 * Volatile register usage (for linkage through hrmJoin):
2096 * r4 : high-order 32 bits of guest virtual address
2097 * r5 : low-order 32 bits of guest virtual address
2098 * r11: saved MSR image
2099 *
2100 * Non-volatile register usage:
2101 * r26: VMM extension block's physical address
2102 * r27: host pmap's physical address
2103 * r28: guest pmap's physical address
2104 *
2105 */
2106
2107 .align 5
2108 .globl EXT(hw_scrub_guest)
2109
2110LEXT(hw_scrub_guest)
2111 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2112 mflr r0 ; Save the link register
2113 stw r15,FM_ARG0+0x00(r1) ; Save a register
2114 stw r16,FM_ARG0+0x04(r1) ; Save a register
2115 stw r17,FM_ARG0+0x08(r1) ; Save a register
2116 mfsprg r2,2 ; Get feature flags
2117 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2118 stw r19,FM_ARG0+0x10(r1) ; Save a register
2119 stw r20,FM_ARG0+0x14(r1) ; Save a register
2120 stw r21,FM_ARG0+0x18(r1) ; Save a register
2121 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2122 mtcrf 0x02,r2 ; move pf64Bit cr6
2123 stw r23,FM_ARG0+0x20(r1) ; Save a register
2124 stw r24,FM_ARG0+0x24(r1) ; Save a register
2125 stw r25,FM_ARG0+0x28(r1) ; Save a register
2126 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2127 stw r27,FM_ARG0+0x30(r1) ; Save a register
2128 li r6,0 ; Set no next address return
2129 stw r28,FM_ARG0+0x34(r1) ; Save a register
2130 stw r29,FM_ARG0+0x38(r1) ; Save a register
2131 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2132 stw r31,FM_ARG0+0x40(r1) ; Save a register
2133 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2134 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2135
2136 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2137
2138 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2139 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2140 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2141 b hsgStart ; Get to work
2142
2143hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2144 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2145
2146hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2147 xor r27,r4,r9 ; Convert host pmap_t virt->real
2148 bl mapPhysLock ; Lock the physent
2149
2150 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2151
2152 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2153hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2154 beq hsg32Miss ; Did not find one...
2155 lwz r8,mpFlags(r12) ; Get mapping's flags
2156 lhz r7,mpSpace(r12) ; Get mapping's space id
2157 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2158 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2159 xori r8,r8,mpGuest ; Is it a guest mapping?
2160 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2161 slwi r9,r7,2 ; Multiply space by 4
2162 lwz r28,0(r28) ; Get the actual translation map
2163 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2164 slwi r7,r7,3 ; Multiply space by 8
2165 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2166 add r7,r7,r9 ; Get correct displacement into translate table
2167 add r28,r28,r7 ; Point to the pmap translation
2168 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2169 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2170 xor r7,r7,r26 ; Is guest associated with specified host?
2171 or. r7,r7,r8 ; Guest mapping && associated with host?
2172 lwz r12,mpAlias+4(r12) ; Chain on to the next
2173 bne hsg32Loop ; Try next mapping on alias chain
2174
2175hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2176 b hrmJoin ; Join common path for mapping removal
2177
2178 .align 5
2179hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2180 mtmsr r11 ; Restore 'rupts, translation
2181 isync ; Throw a small wrench into the pipeline
2182 li r3,mapRtEmpty ; No mappings found matching specified criteria
2183 b hrmRetnCmn ; Exit through common epilog
2184
2185 .align 5
2186hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2187 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2188 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2189hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2190 beq hsg64Miss ; Did not find one...
2191 lwz r8,mpFlags(r12) ; Get mapping's flags
2192 lhz r7,mpSpace(r12) ; Get mapping's space id
2193 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2194 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2195 xori r8,r8,mpGuest ; Is it a guest mapping?
2196 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2197 slwi r9,r7,2 ; Multiply space by 4
2198 lwz r28,0(r28) ; Get the actual translation map
2199 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2200 slwi r7,r7,3 ; Multiply space by 8
2201 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2202 add r7,r7,r9 ; Get correct displacement into translate table
2203 add r28,r28,r7 ; Point to the pmap translation
2204 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2205 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2206 xor r7,r7,r26 ; Is guest associated with specified host?
2207 or. r7,r7,r8 ; Guest mapping && associated with host?
2208 ld r12,mpAlias(r12) ; Chain on to the next
2209 bne hsg64Loop ; Try next mapping on alias chain
2210
2211hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2212 b hrmJoin ; Join common path for mapping removal
2213
2214 .align 5
2215hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
b36670ce 2216 mtmsrd r11 ; Restore 'rupts, translation
91447636
A
2217 li r3,mapRtEmpty ; No mappings found matching specified criteria
2218 b hrmRetnCmn ; Exit through common epilog
2219
1c79356b
A
2220
2221/*
55e303ae
A
2222 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2223 *
2224 * Upon entry, R3 contains a pointer to a physent.
2225 * space is the space ID from the pmap in question
2226 *
2227 * We return the virtual address of the found mapping in
2228 * R3. Note that the mapping busy is bumped.
2229 *
2230 * Note that this is designed to be called from 32-bit mode with a stack.
2231 *
2232 * We disable translation and all interruptions here. This keeps is
2233 * from having to worry about a deadlock due to having anything locked
2234 * and needing it to process a fault.
2235 *
1c79356b
A
2236 */
2237
2238 .align 5
55e303ae
A
2239 .globl EXT(hw_find_space)
2240
2241LEXT(hw_find_space)
2242 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2243 mflr r0 ; Save the link register
2244 mr r8,r4 ; Remember the space
2245 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2246
2247 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1c79356b 2248
55e303ae 2249 bl mapPhysLock ; Lock the physent
1c79356b 2250
55e303ae
A
2251 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2252
2253 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
d7e50217 2254
91447636 2255hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
55e303ae
A
2256 beq hfsNone ; Did not find one...
2257
2258 lhz r10,mpSpace(r12) ; Get the space
2259
2260 cmplw r10,r8 ; Is this one of ours?
2261 beq hfsFnd ; Yes...
2262
2263 lwz r12,mpAlias+4(r12) ; Chain on to the next
2264 b hfsSrc32 ; Check it out...
1c79356b 2265
55e303ae
A
2266 .align 5
2267
91447636 2268hfsSF: li r0,ppLFAmask
55e303ae 2269 ld r12,ppLink(r3) ; Get the pointer to the first mapping
91447636 2270 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
2271
2272hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2273 beq hfsNone ; Did not find one...
2274
2275 lhz r10,mpSpace(r12) ; Get the space
2276
2277 cmplw r10,r8 ; Is this one of ours?
2278 beq hfsFnd ; Yes...
2279
2280 ld r12,mpAlias(r12) ; Chain on to the next
2281 b hfsSrc64 ; Check it out...
2282
2283 .align 5
2284
2285hfsFnd: mr r8,r3 ; Save the physent
2286 mr r3,r12 ; Point to the mapping
2287 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 2288
55e303ae
A
2289 mr r3,r8 ; Get back the physical entry
2290 li r7,0xFFF ; Get a page size mask
2291 bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 2292
55e303ae
A
2293 andc r3,r12,r7 ; Move the mapping back down to a page
2294 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2295 xor r12,r3,r12 ; Convert to virtual
2296 b hfsRet ; Time to return
2297
2298 .align 5
2299
2300hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2301
2302hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 2303
55e303ae
A
2304 mtmsr r11 ; Restore enables/translation/etc.
2305 isync
2306 b hfsRetnCmn ; Join the common return code...
1c79356b 2307
55e303ae
A
2308hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2309 isync
1c79356b 2310
55e303ae
A
2311;
2312; NOTE: we have not used any registers other than the volatiles to this point
2313;
1c79356b 2314
55e303ae 2315hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
91447636
A
2316
2317#if DEBUG
2318 mr. r3,r3 ; Anything to return?
2319 beq hfsRetnNull ; Nope
2320 lwz r11,mpFlags(r3) ; Get mapping flags
2321 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2322 cmplwi r0,mpGuest ; Shadow guest mapping?
2323 beq hfsPanic ; Yup, kick the bucket
2324hfsRetnNull:
2325#endif
2326
55e303ae
A
2327 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2328
2329 mtlr r12 ; Restore the return
2330 lwz r1,0(r1) ; Pop the stack
2331 blr ; Leave...
1c79356b 2332
91447636
A
2333hfsPanic: lis r0,hi16(Choke) ; System abend
2334 ori r0,r0,lo16(Choke) ; System abend
2335 li r3,failMapping ; Show that we failed some kind of mapping thing
2336 sc
1c79356b 2337
55e303ae
A
2338;
2339; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2340; Returns 0 if not found or the virtual address of the mapping if
2341; if is. Also, the mapping has the busy count bumped.
2342;
2343 .align 5
2344 .globl EXT(hw_find_map)
1c79356b 2345
55e303ae
A
2346LEXT(hw_find_map)
2347 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2348 mflr r0 ; Save the link register
2349 stw r25,FM_ARG0+0x00(r1) ; Save a register
2350 stw r26,FM_ARG0+0x04(r1) ; Save a register
2351 mr r25,r6 ; Remember address of next va
2352 stw r27,FM_ARG0+0x08(r1) ; Save a register
2353 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2354 stw r29,FM_ARG0+0x10(r1) ; Save a register
2355 stw r30,FM_ARG0+0x14(r1) ; Save a register
2356 stw r31,FM_ARG0+0x18(r1) ; Save a register
2357 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 2358
91447636
A
2359#if DEBUG
2360 lwz r11,pmapFlags(r3) ; Get pmaps flags
2361 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2362 bne hfmPanic ; Call not valid for guest shadow assist pmap
2363#endif
2364
55e303ae
A
2365 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2366 lwz r7,pmapvr+4(r3) ; Get the second part
1c79356b 2367
1c79356b 2368
55e303ae
A
2369 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2370
2371 mr r27,r11 ; Remember the old MSR
2372 mr r26,r12 ; Remember the feature bits
9bccf70c 2373
55e303ae 2374 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2375
55e303ae 2376 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
1c79356b 2377
55e303ae 2378 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 2379
55e303ae
A
2380hfmSF1: mr r29,r4 ; Save top half of vaddr
2381 mr r30,r5 ; Save the bottom half
2382
2383 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2384 bl sxlkShared ; Go get a shared lock on the mapping lists
2385 mr. r3,r3 ; Did we get the lock?
2386 bne-- hfmBadLock ; Nope...
1c79356b 2387
55e303ae
A
2388 mr r3,r28 ; get the pmap address
2389 mr r4,r29 ; Get bits 0:31 to look for
2390 mr r5,r30 ; Get bits 32:64
2391
2392 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
1c79356b 2393
55e303ae
A
2394 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2395 mr. r31,r3 ; Save the mapping if we found it
2396 cmplwi cr1,r0,0 ; Are we removing?
2397 mr r29,r4 ; Save next va high half
2398 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2399 mr r30,r5 ; Save next va low half
2400 li r6,0 ; Assume we did not find it
2401 li r26,0xFFF ; Get a mask to relocate to start of mapping page
1c79356b 2402
55e303ae 2403 bt-- cr0_eq,hfmNotFnd ; We did not find it...
1c79356b 2404
55e303ae 2405 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 2406
55e303ae 2407 andc r4,r31,r26 ; Get back to the mapping page start
1c79356b 2408
55e303ae
A
2409; Note: we can treat 32- and 64-bit the same here. Because we are going from
2410; physical to virtual and we only do 32-bit virtual, we only need the low order
2411; word of the xor.
d7e50217 2412
55e303ae
A
2413 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2414 li r6,-1 ; Indicate we found it and it is not being removed
2415 xor r31,r31,r4 ; Flip to virtual
d7e50217 2416
55e303ae
A
2417hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2418 bl sxlkUnlock ; Unlock the search list
d7e50217 2419
55e303ae
A
2420 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2421 and r3,r3,r6 ; Clear if not found or removing
de355530 2422
55e303ae 2423hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
de355530 2424
55e303ae
A
2425 mtmsr r27 ; Restore enables/translation/etc.
2426 isync
2427 b hfmReturnC ; Join common...
2428
2429hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2430 isync
2431
2432hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2433 stw r30,4(r25) ; Save the bottom of the next va
2434 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2435 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2436 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2437 and r3,r3,r6 ; Clear return if the mapping is being removed
2438 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2439 mtlr r0 ; Restore the return
2440 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2441 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2442 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2443 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2444 lwz r1,0(r1) ; Pop the stack
2445 blr ; Leave...
2446
2447 .align 5
2448
2449hfmBadLock: li r3,1 ; Set lock time out error code
2450 b hfmReturn ; Leave....
1c79356b 2451
91447636
A
2452hfmPanic: lis r0,hi16(Choke) ; System abend
2453 ori r0,r0,lo16(Choke) ; System abend
2454 li r3,failMapping ; Show that we failed some kind of mapping thing
2455 sc
2456
2457
2458/*
2459 * void hw_clear_maps(void)
2460 *
2461 * Remove all mappings for all phys entries.
2462 *
2463 *
2464 */
2465
2466 .align 5
2467 .globl EXT(hw_clear_maps)
2468
2469LEXT(hw_clear_maps)
2470 mflr r10 ; Save the link register
2471 mfcr r9 ; Save the condition register
2472 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2473
2474 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2475 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2476
2477hcmNextRegion:
2478 lwz r3,mrPhysTab(r5) ; Get the actual table address
2479 lwz r0,mrStart(r5) ; Get start of table entry
2480 lwz r4,mrEnd(r5) ; Get end of table entry
2481 addi r5,r5,mrSize ; Point to the next regions
2482
2483 cmplwi r3,0 ; No more regions?
2484 beq-- hcmDone ; Leave...
2485
2486 sub r4,r4,r0 ; Calculate physical entry count
2487 addi r4,r4,1
2488 mtctr r4
2489
2490 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2491
2492
2493hcmNextPhys32:
2494 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2495 addi r3,r3,physEntrySize ; Next phys_entry
2496
2497hcmNextMap32:
3a60a9f5 2498 rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address
91447636
A
2499 beq hcmNoMap32 ; Did not find one...
2500
2501 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2502 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2503 stw r0,mpPte(r4) ; Get the quick pointer again
2504
2505 lwz r4,mpAlias+4(r4) ; Chain on to the next
2506 b hcmNextMap32 ; Check it out...
2507hcmNoMap32:
2508 bdnz hcmNextPhys32
2509 b hcmNextRegion
2510
2511
2512 .align 5
2513hcmNextPhys64:
2514 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2515 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2516 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2517 addi r3,r3,physEntrySize ; Next phys_entry
2518
2519hcmNextMap64:
2520 andc. r4,r4,r0 ; Clean and test mapping address
2521 beq hcmNoMap64 ; Did not find one...
2522
2523 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2524 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2525 stw r0,mpPte(r4) ; Get the quick pointer again
2526
2527 ld r4,mpAlias(r4) ; Chain on to the next
2528 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2529 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2530 b hcmNextMap64 ; Check it out...
2531hcmNoMap64:
2532 bdnz hcmNextPhys64
2533 b hcmNextRegion
2534
2535
2536 .align 5
2537hcmDone:
2538 mtlr r10 ; Restore the return
2539 mtcr r9 ; Restore the condition register
2540 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2541hcmDone32:
2542 mtmsr r11 ; Restore translation/mode/etc.
2543 isync
2544 blr ; Leave...
2545
2546hcmDone64:
2547 mtmsrd r11 ; Restore translation/mode/etc.
2548 isync
2549 blr ; Leave...
2550
2551
1c79356b
A
2552
2553/*
91447636 2554 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
55e303ae
A
2555 * walks all mapping for a physical page and performs
2556 * specified operations on each.
1c79356b 2557 *
55e303ae
A
2558 * pp is unlocked physent
2559 * preop is operation to perform on physent before walk. This would be
2560 * used to set cache attribute or protection
2561 * op is the operation to perform on each mapping during walk
2562 * postop is operation to perform in the phsyent after walk. this would be
2563 * used to set or reset the RC bits.
91447636
A
2564 * opmod modifies the action taken on any connected PTEs visited during
2565 * the mapping walk.
55e303ae
A
2566 *
2567 * We return the RC bits from before postop is run.
2568 *
2569 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 2570 *
55e303ae
A
2571 * We disable translation and all interruptions here. This keeps is
2572 * from having to worry about a deadlock due to having anything locked
2573 * and needing it to process a fault.
d7e50217 2574 *
55e303ae
A
2575 * We lock the physent, execute preop, and then walk each mapping in turn.
2576 * If there is a PTE, it is invalidated and the RC merged into the physent.
2577 * Then we call the op function.
2578 * Then we revalidate the PTE.
2579 * Once all all mappings are finished, we save the physent RC and call the
2580 * postop routine. Then we unlock the physent and return the RC.
2581 *
2582 *
1c79356b
A
2583 */
2584
1c79356b 2585 .align 5
55e303ae
A
2586 .globl EXT(hw_walk_phys)
2587
2588LEXT(hw_walk_phys)
91447636 2589 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
55e303ae 2590 mflr r0 ; Save the link register
91447636
A
2591 stw r24,FM_ARG0+0x00(r1) ; Save a register
2592 stw r25,FM_ARG0+0x04(r1) ; Save a register
2593 stw r26,FM_ARG0+0x08(r1) ; Save a register
2594 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2595 mr r24,r8 ; Save the parm
55e303ae 2596 mr r25,r7 ; Save the parm
91447636
A
2597 stw r28,FM_ARG0+0x10(r1) ; Save a register
2598 stw r29,FM_ARG0+0x14(r1) ; Save a register
2599 stw r30,FM_ARG0+0x18(r1) ; Save a register
2600 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2601 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
55e303ae
A
2602
2603 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
91447636
A
2604
2605 mfsprg r26,0 ; (INSTRUMENTATION)
2606 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2607 addi r27,r27,1 ; (INSTRUMENTATION)
2608 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2609 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2610 slwi r12,r24,2 ; (INSTRUMENTATION)
2611 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2612 addi r27,r27,1 ; (INSTRUMENTATION)
2613 stwx r27,r26,r12 ; (INSTRUMENTATION)
55e303ae
A
2614
2615 mr r26,r11 ; Save the old MSR
2616 lis r27,hi16(hwpOpBase) ; Get high order of op base
2617 slwi r4,r4,7 ; Convert preop to displacement
2618 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2619 slwi r5,r5,7 ; Convert op to displacement
2620 add r12,r4,r27 ; Point to the preop routine
2621 slwi r28,r6,7 ; Convert postop to displacement
2622 mtctr r12 ; Set preop routine
2623 add r28,r28,r27 ; Get the address of the postop routine
2624 add r27,r5,r27 ; Get the address of the op routine
1c79356b 2625
55e303ae 2626 bl mapPhysLock ; Lock the physent
1c79356b 2627
55e303ae
A
2628 mr r29,r3 ; Save the physent address
2629
2630 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2631
2632 bctrl ; Call preop routine
2633 bne- hwpEarly32 ; preop says to bail now...
91447636
A
2634
2635 cmplwi r24,hwpMergePTE ; Classify operation modifier
55e303ae
A
2636 mtctr r27 ; Set up the op function address
2637 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
91447636
A
2638 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2639 beq hwpMSrc32 ; Do TLB merge for each mapping
2640
3a60a9f5 2641hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
91447636 2642 beq hwpNone32 ; Did not find one...
55e303ae 2643
91447636
A
2644 bctrl ; Call the op function
2645
2646 bne- hwpEarly32 ; op says to bail now...
2647 lwz r31,mpAlias+4(r31) ; Chain on to the next
2648 b hwpQSrc32 ; Check it out...
2649
2650 .align 5
3a60a9f5 2651hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
55e303ae 2652 beq hwpNone32 ; Did not find one...
91447636
A
2653
2654 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2655 bctrl ; Call the op function
2656
2657 bne- hwpEarly32 ; op says to bail now...
2658 lwz r31,mpAlias+4(r31) ; Chain on to the next
2659 b hwpMSrc32 ; Check it out...
d7e50217 2660
91447636
A
2661 .align 5
2662hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2663 beq hwpNone32 ; Did not find one...
2664
55e303ae
A
2665;
2666; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2667; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2668; If there is no PTE, PTE low is obtained from mapping
2669;
2670 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2671
2672 bctrl ; Call the op function
2673
2674 crmove cr1_eq,cr0_eq ; Save the return code
2675
2676 mr. r3,r3 ; Was there a previously valid PTE?
2677 beq- hwpNxt32 ; Nope...
1c79356b 2678
55e303ae
A
2679 stw r5,4(r3) ; Store second half of PTE
2680 eieio ; Make sure we do not reorder
2681 stw r4,0(r3) ; Revalidate the PTE
2682
2683 eieio ; Make sure all updates come first
2684 stw r6,0(r7) ; Unlock the PCA
d7e50217 2685
55e303ae
A
2686hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2687 lwz r31,mpAlias+4(r31) ; Chain on to the next
2688 b hwpSrc32 ; Check it out...
1c79356b 2689
55e303ae 2690 .align 5
1c79356b 2691
55e303ae 2692hwpNone32: mtctr r28 ; Get the post routine address
1c79356b 2693
55e303ae
A
2694 lwz r30,ppLink+4(r29) ; Save the old RC
2695 mr r3,r29 ; Get the physent address
2696 bctrl ; Call post routine
1c79356b 2697
55e303ae
A
2698 bl mapPhysUnlock ; Unlock the physent
2699
2700 mtmsr r26 ; Restore translation/mode/etc.
2701 isync
1c79356b 2702
55e303ae 2703 b hwpReturn ; Go restore registers and return...
1c79356b 2704
55e303ae 2705 .align 5
1c79356b 2706
55e303ae
A
2707hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2708 mr r3,r29 ; Get the physent address
2709 bl mapPhysUnlock ; Unlock the physent
2710
2711 mtmsr r26 ; Restore translation/mode/etc.
2712 isync
2713
2714 b hwpReturn ; Go restore registers and return...
1c79356b 2715
55e303ae 2716 .align 5
1c79356b 2717
55e303ae
A
2718hwp64: bctrl ; Call preop routine
2719 bne-- hwpEarly64 ; preop says to bail now...
d7e50217 2720
91447636 2721 cmplwi r24,hwpMergePTE ; Classify operation modifier
55e303ae
A
2722 mtctr r27 ; Set up the op function address
2723
91447636 2724 li r24,ppLFAmask
55e303ae 2725 ld r31,ppLink(r3) ; Get the pointer to the first mapping
91447636
A
2726 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2727 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2728 beq hwpMSrc64 ; Do TLB merge for each mapping
55e303ae 2729
91447636
A
2730hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2731 beq hwpNone64 ; Did not find one...
2732
2733 bctrl ; Call the op function
2734
2735 bne-- hwpEarly64 ; op says to bail now...
2736 ld r31,mpAlias(r31) ; Chain on to the next
2737 b hwpQSrc64 ; Check it out...
2738
2739 .align 5
2740hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2741 beq hwpNone64 ; Did not find one...
2742
2743 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2744 bctrl ; Call the op function
2745
2746 bne-- hwpEarly64 ; op says to bail now...
2747 ld r31,mpAlias(r31) ; Chain on to the next
2748 b hwpMSrc64 ; Check it out...
2749
2750 .align 5
2751hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
55e303ae
A
2752 beq hwpNone64 ; Did not find one...
2753;
2754; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2755; PTE low in R5. PTEG comes back locked if there is one
2756;
2757 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
1c79356b 2758
55e303ae 2759 bctrl ; Call the op function
1c79356b 2760
55e303ae 2761 crmove cr1_eq,cr0_eq ; Save the return code
1c79356b 2762
55e303ae
A
2763 mr. r3,r3 ; Was there a previously valid PTE?
2764 beq-- hwpNxt64 ; Nope...
2765
2766 std r5,8(r3) ; Save bottom of PTE
2767 eieio ; Make sure we do not reorder
2768 std r4,0(r3) ; Revalidate the PTE
d7e50217 2769
55e303ae
A
2770 eieio ; Make sure all updates come first
2771 stw r6,0(r7) ; Unlock the PCA
2772
2773hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2774 ld r31,mpAlias(r31) ; Chain on to the next
55e303ae 2775 b hwpSrc64 ; Check it out...
1c79356b 2776
55e303ae
A
2777 .align 5
2778
2779hwpNone64: mtctr r28 ; Get the post routine address
2780
2781 lwz r30,ppLink+4(r29) ; Save the old RC
2782 mr r3,r29 ; Get the physent address
2783 bctrl ; Call post routine
2784
2785 bl mapPhysUnlock ; Unlock the physent
2786
2787 mtmsrd r26 ; Restore translation/mode/etc.
1c79356b 2788 isync
55e303ae
A
2789 b hwpReturn ; Go restore registers and return...
2790
2791 .align 5
1c79356b 2792
55e303ae
A
2793hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2794 mr r3,r29 ; Get the physent address
2795 bl mapPhysUnlock ; Unlock the physent
2796
2797 mtmsrd r26 ; Restore translation/mode/etc.
2798 isync
2799
91447636
A
2800hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2801 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2802 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2803 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
55e303ae 2804 mr r3,r30 ; Pass back the RC
91447636
A
2805 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2806 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
55e303ae 2807 mtlr r0 ; Restore the return
91447636
A
2808 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2809 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2810 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
55e303ae
A
2811 lwz r1,0(r1) ; Pop the stack
2812 blr ; Leave...
d7e50217 2813
d7e50217 2814
55e303ae
A
2815;
2816; The preop/op/postop function table.
2817; Each function must be 64-byte aligned and be no more than
2818; 16 instructions. If more than 16, we must fix address calculations
2819; at the start of hwpOpBase
2820;
2821; The routine must set CR0_EQ in order to continue scan.
2822; If CR0_EQ is not set, an early return from the function is made.
2823;
d7e50217 2824
55e303ae
A
2825 .align 7
2826
2827hwpOpBase:
2828
2829; Function 0 - No operation
2830
2831hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2832 blr ; Just return...
1c79356b
A
2833
2834 .align 5
1c79356b 2835
55e303ae 2836; This is the continuation of function 4 - Set attributes in mapping
1c79356b 2837
55e303ae
A
2838; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2839; NOTE: Do we have to deal with i-cache here?
2840
91447636 2841hwpSAM: li r11,4096 ; Get page size
d7e50217 2842
55e303ae
A
2843hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2844 dcbf r11,r5 ; Flush the line in the data cache
2845 bgt++ hwpSAMinvd ; Go do the rest of it...
2846
2847 sync ; Make sure it is done
1c79356b 2848
91447636 2849 li r11,4096 ; Get page size
55e303ae
A
2850
2851hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2852 icbi r11,r5 ; Flush the line in the icache
2853 bgt++ hwpSAMinvi ; Go do the rest of it...
2854
2855 sync ; Make sure it is done
1c79356b 2856
55e303ae
A
2857 cmpw r0,r0 ; Make sure we return CR0_EQ
2858 blr ; Return...
1c79356b 2859
1c79356b 2860
91447636 2861; Function 1 - Set protection in physent (obsolete)
1c79356b 2862
55e303ae
A
2863 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2864
91447636 2865hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
55e303ae 2866 blr ; Return...
1c79356b 2867
1c79356b 2868
55e303ae 2869; Function 2 - Set protection in mapping
1c79356b 2870
55e303ae 2871 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
1c79356b 2872
55e303ae
A
2873hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2874 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2875 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
91447636 2876 li r0,lo16(mpN|mpPP) ; Get no-execute and protection bits
55e303ae 2877 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
91447636 2878 rlwinm r2,r25,0,mpNb-32,mpPPe-32 ; Isolate new no-execute and protection bits
55e303ae 2879 beqlr-- ; Leave if permanent mapping (before we trash R5)...
91447636
A
2880 andc r5,r5,r0 ; Clear the old no-execute and prot bits
2881 or r5,r5,r2 ; Move in the new no-execute and prot bits
55e303ae
A
2882 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2883 cmpw r0,r0 ; Make sure we return CR0_EQ
2884 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2885 blr ; Leave...
2886
2887; Function 3 - Set attributes in physent
1c79356b 2888
55e303ae 2889 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
1c79356b 2890
91447636 2891hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
1c79356b 2892
55e303ae 2893hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
91447636 2894 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
55e303ae
A
2895 stwcx. r4,r5,r29 ; Try to stuff it
2896 bne-- hwpSAtrPhX ; Try again...
2897; Note: CR0_EQ is set because of stwcx.
2898 blr ; Return...
de355530 2899
55e303ae 2900; Function 4 - Set attributes in mapping
d7e50217 2901
55e303ae
A
2902 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2903
2904hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2905 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
91447636 2906 li r2,mpM ; Force on coherent
55e303ae
A
2907 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2908 li r0,lo16(mpWIMG) ; Get wimg mask
2909 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
91447636
A
2910 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2911 ; Copy in the cache inhibited bit
55e303ae
A
2912 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2913 andc r5,r5,r0 ; Clear the old wimg
91447636
A
2914 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2915 ; Copy in the guarded bit
55e303ae
A
2916 mfsprg r9,2 ; Feature flags
2917 or r5,r5,r2 ; Move in the new wimg
2918 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2919 lwz r2,mpPAddr(r31) ; Get the physical address
2920 li r0,0xFFF ; Start a mask
2921 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2922 rlwinm r5,r0,0,1,0 ; Copy to top half
2923 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2924 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2925 and r5,r5,r2 ; Clean stuff in top 32 bits
2926 andc r2,r2,r0 ; Clean bottom too
2927 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2928 b hwpSAM ; Join common
1c79356b 2929
55e303ae
A
2930; NOTE: we moved the remainder of the code out of here because it
2931; did not fit in the 128 bytes allotted. It got stuck into the free space
2932; at the end of the no-op function.
2933
2934
2935
de355530 2936
55e303ae 2937; Function 5 - Clear reference in physent
1c79356b 2938
55e303ae 2939 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
1c79356b 2940
55e303ae 2941hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2942
55e303ae 2943hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
91447636 2944 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
55e303ae
A
2945 stwcx. r4,r5,r29 ; Try to stuff it
2946 bne-- hwpCRefPhX ; Try again...
2947; Note: CR0_EQ is set because of stwcx.
2948 blr ; Return...
1c79356b
A
2949
2950
55e303ae 2951; Function 6 - Clear reference in mapping
1c79356b 2952
55e303ae 2953 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
1c79356b 2954
55e303ae
A
2955hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2956 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2957 andc r5,r5,r0 ; Clear in PTE copy
2958 andc r8,r8,r0 ; and in the mapping
2959 cmpw r0,r0 ; Make sure we return CR0_EQ
2960 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2961 blr ; Return...
1c79356b 2962
de355530 2963
55e303ae 2964; Function 7 - Clear change in physent
1c79356b 2965
55e303ae 2966 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
1c79356b 2967
55e303ae 2968hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2969
55e303ae
A
2970hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2971 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2972 stwcx. r4,r5,r29 ; Try to stuff it
2973 bne-- hwpCCngPhX ; Try again...
2974; Note: CR0_EQ is set because of stwcx.
2975 blr ; Return...
1c79356b 2976
de355530 2977
55e303ae 2978; Function 8 - Clear change in mapping
1c79356b 2979
55e303ae
A
2980 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2981
2982hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2983 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2984 andc r5,r5,r0 ; Clear in PTE copy
2985 andc r8,r8,r0 ; and in the mapping
2986 cmpw r0,r0 ; Make sure we return CR0_EQ
2987 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2988 blr ; Return...
d7e50217 2989
de355530 2990
55e303ae 2991; Function 9 - Set reference in physent
d7e50217 2992
55e303ae 2993 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
d7e50217 2994
55e303ae
A
2995hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2996
2997hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
2998 ori r4,r4,lo16(ppR) ; Set the reference
2999 stwcx. r4,r5,r29 ; Try to stuff it
3000 bne-- hwpSRefPhX ; Try again...
3001; Note: CR0_EQ is set because of stwcx.
3002 blr ; Return...
d7e50217 3003
1c79356b 3004
55e303ae 3005; Function 10 - Set reference in mapping
d7e50217 3006
55e303ae
A
3007 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3008
3009hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
55e303ae
A
3010 ori r8,r8,lo16(mpR) ; Set reference in mapping
3011 cmpw r0,r0 ; Make sure we return CR0_EQ
3012 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3013 blr ; Return...
3014
3015; Function 11 - Set change in physent
1c79356b 3016
55e303ae 3017 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
1c79356b 3018
55e303ae 3019hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 3020
55e303ae
A
3021hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3022 ori r4,r4,lo16(ppC) ; Set the change bit
3023 stwcx. r4,r5,r29 ; Try to stuff it
3024 bne-- hwpSCngPhX ; Try again...
3025; Note: CR0_EQ is set because of stwcx.
3026 blr ; Return...
de355530 3027
55e303ae 3028; Function 12 - Set change in mapping
1c79356b 3029
55e303ae 3030 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
1c79356b 3031
55e303ae 3032hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
55e303ae
A
3033 ori r8,r8,lo16(mpC) ; Set chage in mapping
3034 cmpw r0,r0 ; Make sure we return CR0_EQ
3035 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3036 blr ; Return...
1c79356b 3037
55e303ae 3038; Function 13 - Test reference in physent
1c79356b 3039
55e303ae
A
3040 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3041
3042hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3043 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3044 blr ; Return (CR0_EQ set to continue if reference is off)...
1c79356b 3045
1c79356b 3046
55e303ae 3047; Function 14 - Test reference in mapping
1c79356b 3048
55e303ae 3049 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
de355530 3050
55e303ae
A
3051hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3052 blr ; Return (CR0_EQ set to continue if reference is off)...
3053
91447636 3054
55e303ae 3055; Function 15 - Test change in physent
1c79356b 3056
55e303ae 3057 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
1c79356b 3058
55e303ae
A
3059hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3060 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
91447636 3061 blr ; Return (CR0_EQ set to continue if change is off)...
55e303ae
A
3062
3063
3064; Function 16 - Test change in mapping
3065
3066 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
d7e50217 3067
55e303ae 3068hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
91447636
A
3069 blr ; Return (CR0_EQ set to continue if change is off)...
3070
3071
3072; Function 17 - Test reference and change in physent
55e303ae
A
3073
3074 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3075
91447636
A
3076hwpTRefCngPhy:
3077 lwz r0,ppLink+4(r29) ; Get the flags from physent
3078 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3079 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3080 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3081 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3082
3083
3084; Function 18 - Test reference and change in mapping
3085
3086 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3087hwpTRefCngMap:
3088 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3089 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3090 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3091 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3092
3093
3094; Function 19 - Clear reference and change in physent
3095
3096 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3097hwpCRefCngPhy:
3098 li r5,ppLink+4 ; Get offset for flag part of physent
3099
3100hwpCRefCngPhX:
3101 lwarx r4,r5,r29 ; Get the old flags
3102 andc r4,r4,r25 ; Clear R and C as specified by mask
3103 stwcx. r4,r5,r29 ; Try to stuff it
3104 bne-- hwpCRefCngPhX ; Try again...
3105; Note: CR0_EQ is set because of stwcx.
3106 blr ; Return...
3107
3108
3109; Function 20 - Clear reference and change in mapping
3110
3111 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3112hwpCRefCngMap:
3113 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3114 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3115 andc r5,r5,r0 ; Clear in PTE copy
3116 andc r8,r8,r0 ; and in the mapping
3117 cmpw r0,r0 ; Make sure we return CR0_EQ
3118 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3119 blr ; Return...
3120
d7e50217 3121
91447636 3122 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
d7e50217 3123
de355530 3124;
91447636 3125; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
55e303ae
A
3126;
3127; Returns:
3128; mapRtOK - if all is ok
3129; mapRtBadLk - if mapping lock fails
3130; mapRtPerm - if mapping is permanent
3131; mapRtNotFnd - if mapping is not found
3132; mapRtBlock - if mapping is a block
de355530 3133;
55e303ae
A
3134 .align 5
3135 .globl EXT(hw_protect)
d7e50217 3136
55e303ae
A
3137LEXT(hw_protect)
3138 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3139 mflr r0 ; Save the link register
3140 stw r24,FM_ARG0+0x00(r1) ; Save a register
3141 stw r25,FM_ARG0+0x04(r1) ; Save a register
3142 mr r25,r7 ; Remember address of next va
3143 stw r26,FM_ARG0+0x08(r1) ; Save a register
3144 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3145 stw r28,FM_ARG0+0x10(r1) ; Save a register
3146 mr r24,r6 ; Save the new protection flags
3147 stw r29,FM_ARG0+0x14(r1) ; Save a register
3148 stw r30,FM_ARG0+0x18(r1) ; Save a register
3149 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3150 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 3151
91447636
A
3152#if DEBUG
3153 lwz r11,pmapFlags(r3) ; Get pmaps flags
3154 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3155 bne hpPanic ; Call not valid for guest shadow assist pmap
3156#endif
3157
55e303ae
A
3158 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3159 lwz r7,pmapvr+4(r3) ; Get the second part
d7e50217 3160
d7e50217 3161
55e303ae 3162 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 3163
55e303ae
A
3164 mr r27,r11 ; Remember the old MSR
3165 mr r26,r12 ; Remember the feature bits
9bccf70c 3166
55e303ae 3167 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 3168
55e303ae
A
3169 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3170
3171 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
9bccf70c 3172
55e303ae
A
3173hpSF1: mr r29,r4 ; Save top half of vaddr
3174 mr r30,r5 ; Save the bottom half
3175
3176 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3177 bl sxlkShared ; Go get a shared lock on the mapping lists
3178 mr. r3,r3 ; Did we get the lock?
3179 bne-- hpBadLock ; Nope...
d7e50217 3180
55e303ae
A
3181 mr r3,r28 ; get the pmap address
3182 mr r4,r29 ; Get bits 0:31 to look for
3183 mr r5,r30 ; Get bits 32:64
de355530 3184
55e303ae 3185 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
d7e50217 3186
91447636
A
3187 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3188 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3189 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3190 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
55e303ae 3191 mr. r31,r3 ; Save the mapping if we found it
55e303ae
A
3192 mr r29,r4 ; Save next va high half
3193 mr r30,r5 ; Save next va low half
d7e50217 3194
55e303ae 3195 beq-- hpNotFound ; Not found...
de355530 3196
91447636 3197 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
d7e50217 3198
55e303ae
A
3199 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3200
3201 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3202
91447636 3203 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
55e303ae
A
3204 mr. r3,r3 ; Was there a previously valid PTE?
3205
3206 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3207
3208 beq-- hpNoOld32 ; Nope...
1c79356b 3209
55e303ae
A
3210 stw r5,4(r3) ; Store second half of PTE
3211 eieio ; Make sure we do not reorder
3212 stw r4,0(r3) ; Revalidate the PTE
3213
3214 eieio ; Make sure all updates come first
3215 stw r6,0(r7) ; Unlock PCA
3216
3217hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3218 bl sxlkUnlock ; Unlock the search list
de355530 3219
55e303ae
A
3220 li r3,mapRtOK ; Set normal return
3221 b hpR32 ; Join common...
3222
3223 .align 5
1c79356b 3224
d7e50217 3225
55e303ae
A
3226hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3227
91447636 3228 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
55e303ae
A
3229 mr. r3,r3 ; Was there a previously valid PTE?
3230
3231 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3232
3233 beq-- hpNoOld64 ; Nope...
d7e50217 3234
55e303ae
A
3235 std r5,8(r3) ; Store second half of PTE
3236 eieio ; Make sure we do not reorder
3237 std r4,0(r3) ; Revalidate the PTE
de355530 3238
55e303ae
A
3239 eieio ; Make sure all updates come first
3240 stw r6,0(r7) ; Unlock PCA
de355530 3241
55e303ae
A
3242hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3243 bl sxlkUnlock ; Unlock the search list
de355530 3244
55e303ae
A
3245 li r3,mapRtOK ; Set normal return
3246 b hpR64 ; Join common...
de355530 3247
55e303ae
A
3248 .align 5
3249
3250hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3251
3252hpR32: mtmsr r27 ; Restore enables/translation/etc.
3253 isync
3254 b hpReturnC ; Join common...
3255
3256hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3257 isync
3258
3259hpReturnC: stw r29,0(r25) ; Save the top of the next va
3260 stw r30,4(r25) ; Save the bottom of the next va
3261 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3262 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3263 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3264 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3265 mtlr r0 ; Restore the return
3266 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3267 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3268 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3269 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3270 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3271 lwz r1,0(r1) ; Pop the stack
3272 blr ; Leave...
3273
3274 .align 5
3275
3276hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3277 b hpReturn ; Leave....
d7e50217 3278
55e303ae
A
3279hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3280 bl sxlkUnlock ; Unlock the search list
d7e50217 3281
55e303ae
A
3282 li r3,mapRtNotFnd ; Set that we did not find the requested page
3283 b hpReturn ; Leave....
3284
3285hpNotAllowed:
3286 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3287 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3288 bne-- hpNotFound ; Yeah...
3289 bl sxlkUnlock ; Unlock the search list
3290
3291 li r3,mapRtBlock ; Assume it was a block
91447636
A
3292 rlwinm r0,r7,0,mpType ; Isolate mapping type
3293 cmplwi r0,mpBlock ; Is this a block mapping?
3294 beq++ hpReturn ; Yes, leave...
55e303ae
A
3295
3296 li r3,mapRtPerm ; Set that we hit a permanent page
3297 b hpReturn ; Leave....
9bccf70c 3298
91447636
A
3299hpPanic: lis r0,hi16(Choke) ; System abend
3300 ori r0,r0,lo16(Choke) ; System abend
3301 li r3,failMapping ; Show that we failed some kind of mapping thing
3302 sc
3303
9bccf70c 3304
55e303ae
A
3305;
3306; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3307;
3308; Returns following code ORed with RC from mapping
3309; mapRtOK - if all is ok
3310; mapRtBadLk - if mapping lock fails
3311; mapRtNotFnd - if mapping is not found
3312;
3313 .align 5
3314 .globl EXT(hw_test_rc)
9bccf70c 3315
55e303ae
A
3316LEXT(hw_test_rc)
3317 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3318 mflr r0 ; Save the link register
3319 stw r24,FM_ARG0+0x00(r1) ; Save a register
3320 stw r25,FM_ARG0+0x04(r1) ; Save a register
3321 stw r26,FM_ARG0+0x08(r1) ; Save a register
3322 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3323 stw r28,FM_ARG0+0x10(r1) ; Save a register
3324 mr r24,r6 ; Save the reset request
3325 stw r29,FM_ARG0+0x14(r1) ; Save a register
3326 stw r30,FM_ARG0+0x18(r1) ; Save a register
3327 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3328 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
9bccf70c 3329
91447636
A
3330#if DEBUG
3331 lwz r11,pmapFlags(r3) ; Get pmaps flags
3332 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3333 bne htrPanic ; Call not valid for guest shadow assist pmap
3334#endif
3335
55e303ae
A
3336 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3337 lwz r7,pmapvr+4(r3) ; Get the second part
0b4e3aa0 3338
9bccf70c 3339
55e303ae 3340 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 3341
55e303ae
A
3342 mr r27,r11 ; Remember the old MSR
3343 mr r26,r12 ; Remember the feature bits
9bccf70c 3344
55e303ae 3345 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 3346
55e303ae 3347 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
1c79356b 3348
55e303ae 3349 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 3350
55e303ae
A
3351htrSF1: mr r29,r4 ; Save top half of vaddr
3352 mr r30,r5 ; Save the bottom half
1c79356b 3353
55e303ae
A
3354 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3355 bl sxlkShared ; Go get a shared lock on the mapping lists
3356 mr. r3,r3 ; Did we get the lock?
3357 li r25,0 ; Clear RC
3358 bne-- htrBadLock ; Nope...
3359
3360 mr r3,r28 ; get the pmap address
3361 mr r4,r29 ; Get bits 0:31 to look for
3362 mr r5,r30 ; Get bits 32:64
d7e50217 3363
55e303ae 3364 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
9bccf70c 3365
91447636
A
3366 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3367 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3368 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3369 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
55e303ae 3370 mr. r31,r3 ; Save the mapping if we found it
91447636 3371 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
d7e50217 3372
91447636 3373 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
1c79356b 3374
55e303ae
A
3375 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3376
3377 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3378
3379 cmplwi cr1,r24,0 ; Do we want to clear RC?
3380 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3381 mr. r3,r3 ; Was there a previously valid PTE?
3382 li r0,lo16(mpR|mpC) ; Get bits to clear
9bccf70c 3383
55e303ae
A
3384 and r25,r5,r0 ; Save the RC bits
3385 beq++ cr1,htrNoClr32 ; Nope...
3386
3387 andc r12,r12,r0 ; Clear mapping copy of RC
3388 andc r5,r5,r0 ; Clear PTE copy of RC
3389 sth r12,mpVAddr+6(r31) ; Set the new RC
9bccf70c 3390
55e303ae 3391htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
d7e50217 3392
55e303ae
A
3393 sth r5,6(r3) ; Store updated RC
3394 eieio ; Make sure we do not reorder
3395 stw r4,0(r3) ; Revalidate the PTE
9bccf70c 3396
55e303ae
A
3397 eieio ; Make sure all updates come first
3398 stw r6,0(r7) ; Unlock PCA
1c79356b 3399
55e303ae
A
3400htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3401 bl sxlkUnlock ; Unlock the search list
3402 li r3,mapRtOK ; Set normal return
3403 b htrR32 ; Join common...
1c79356b 3404
55e303ae
A
3405 .align 5
3406
3407
3408htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3409
3410 cmplwi cr1,r24,0 ; Do we want to clear RC?
3411 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3412 mr. r3,r3 ; Was there a previously valid PTE?
3413 li r0,lo16(mpR|mpC) ; Get bits to clear
1c79356b 3414
55e303ae
A
3415 and r25,r5,r0 ; Save the RC bits
3416 beq++ cr1,htrNoClr64 ; Nope...
3417
3418 andc r12,r12,r0 ; Clear mapping copy of RC
3419 andc r5,r5,r0 ; Clear PTE copy of RC
3420 sth r12,mpVAddr+6(r31) ; Set the new RC
1c79356b 3421
55e303ae
A
3422htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3423
3424 sth r5,14(r3) ; Store updated RC
3425 eieio ; Make sure we do not reorder
3426 std r4,0(r3) ; Revalidate the PTE
1c79356b 3427
55e303ae
A
3428 eieio ; Make sure all updates come first
3429 stw r6,0(r7) ; Unlock PCA
1c79356b 3430
55e303ae
A
3431htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3432 bl sxlkUnlock ; Unlock the search list
3433 li r3,mapRtOK ; Set normal return
3434 b htrR64 ; Join common...
de355530 3435
55e303ae
A
3436 .align 5
3437
3438htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
de355530 3439
55e303ae
A
3440htrR32: mtmsr r27 ; Restore enables/translation/etc.
3441 isync
3442 b htrReturnC ; Join common...
de355530 3443
55e303ae
A
3444htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3445 isync
1c79356b 3446
55e303ae
A
3447htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3448 or r3,r3,r25 ; Send the RC bits back
3449 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3450 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3451 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3452 mtlr r0 ; Restore the return
3453 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3454 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3455 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3456 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3457 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3458 lwz r1,0(r1) ; Pop the stack
1c79356b
A
3459 blr ; Leave...
3460
3461 .align 5
3462
55e303ae
A
3463htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3464 b htrReturn ; Leave....
1c79356b 3465
55e303ae
A
3466htrNotFound:
3467 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3468 bl sxlkUnlock ; Unlock the search list
1c79356b 3469
55e303ae
A
3470 li r3,mapRtNotFnd ; Set that we did not find the requested page
3471 b htrReturn ; Leave....
3472
91447636
A
3473htrPanic: lis r0,hi16(Choke) ; System abend
3474 ori r0,r0,lo16(Choke) ; System abend
3475 li r3,failMapping ; Show that we failed some kind of mapping thing
3476 sc
3477
3478
3479;
3480;
3481; mapFindLockPN - find and lock physent for a given page number
3482;
3483;
3484 .align 5
3485mapFindLockPN:
3486 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3487 mr r2,r3 ; Save our target
3488 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3489
3490mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3491 lwz r5,mrStart(r9) ; Get start of table entry
3492 lwz r0,mrEnd(r9) ; Get end of table entry
3493 addi r9,r9,mrSize ; Point to the next slot
3a60a9f5 3494 cmplwi cr7,r3,0 ; Are we at the end of the table?
91447636
A
3495 cmplw r2,r5 ; See if we are in this table
3496 cmplw cr1,r2,r0 ; Check end also
3497 sub r4,r2,r5 ; Calculate index to physical entry
3a60a9f5 3498 beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry...
91447636
A
3499 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3500 slwi r4,r4,3 ; Get offset to physical entry
3501
3502 blt-- mapFLPNitr ; Did not find it...
3503
3504 add r3,r3,r4 ; Point right to the slot
3505 b mapPhysLock ; Join common lock code
3506
3507mapFLPNmiss:
3508 li r3,0 ; Show that we did not find it
3509 blr ; Leave...
3510
3511
3512;
55e303ae
A
3513; mapPhysFindLock - find physent list and lock it
3514; R31 points to mapping
3515;
3516 .align 5
3517
3518mapPhysFindLock:
3519 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3520 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
91447636 3521 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
55e303ae
A
3522 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3523 add r3,r3,r4 ; Point to table entry
3524 lwz r5,mpPAddr(r31) ; Get physical page number
3525 lwz r7,mrStart(r3) ; Get the start of range
3526 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3527 sub r6,r5,r7 ; Get index to physent
3528 rlwinm r6,r6,3,0,28 ; Get offset to physent
3529 add r3,r3,r6 ; Point right to the physent
3530 b mapPhysLock ; Join in the lock...
3531
3532;
3533; mapPhysLock - lock a physent list
3534; R3 contains list header
3535;
3536 .align 5
3537
3538mapPhysLockS:
3539 li r2,lgKillResv ; Get a spot to kill reservation
3540 stwcx. r2,0,r2 ; Kill it...
3541
3542mapPhysLockT:
3543 lwz r2,ppLink(r3) ; Get physent chain header
3544 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3545 bne-- mapPhysLockT ; Nope, still locked...
3546
3547mapPhysLock:
3548 lwarx r2,0,r3 ; Get the lock
3549 rlwinm. r0,r2,0,0,0 ; Is it locked?
3550 oris r0,r2,0x8000 ; Set the lock bit
3551 bne-- mapPhysLockS ; It is locked, spin on it...
3552 stwcx. r0,0,r3 ; Try to stuff it back...
3553 bne-- mapPhysLock ; Collision, try again...
3554 isync ; Clear any speculations
3555 blr ; Leave...
3556
3557
3558;
3559; mapPhysUnlock - unlock a physent list
3560; R3 contains list header
3561;
3562 .align 5
3563
3564mapPhysUnlock:
3565 lwz r0,ppLink(r3) ; Get physent chain header
3566 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3567 eieio ; Make sure unlock comes last
3568 stw r0,ppLink(r3) ; Unlock the list
3569 blr
3570
3571;
3572; mapPhysMerge - merge the RC bits into the master copy
3573; R3 points to the physent
3574; R4 contains the RC bits
3575;
3576; Note: we just return if RC is 0
3577;
3578 .align 5
3579
3580mapPhysMerge:
3581 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3582 la r5,ppLink+4(r3) ; Point to the RC field
3583 beqlr-- ; Leave if RC is 0...
3584
3585mapPhysMergeT:
3586 lwarx r6,0,r5 ; Get the RC part
3587 or r6,r6,r4 ; Merge in the RC
3588 stwcx. r6,0,r5 ; Try to stuff it back...
3589 bne-- mapPhysMergeT ; Collision, try again...
3590 blr ; Leave...
3591
3592;
3593; Sets the physent link pointer and preserves all flags
3594; The list is locked
3595; R3 points to physent
3596; R4 has link to set
3597;
3598
3599 .align 5
3600
3601mapPhyCSet32:
3602 la r5,ppLink+4(r3) ; Point to the link word
3603
3604mapPhyCSetR:
3605 lwarx r2,0,r5 ; Get the link and flags
91447636 3606 rlwimi r4,r2,0,ppFlags ; Insert the flags
55e303ae
A
3607 stwcx. r4,0,r5 ; Stick them back
3608 bne-- mapPhyCSetR ; Someone else did something, try again...
3609 blr ; Return...
3610
3611 .align 5
3612
3613mapPhyCSet64:
91447636
A
3614 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3615 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
3616
3617mapPhyCSet64x:
3618 ldarx r2,0,r3 ; Get the link and flags
3619 and r5,r2,r0 ; Isolate the flags
3620 or r6,r4,r5 ; Add them to the link
3621 stdcx. r6,0,r3 ; Stick them back
3622 bne-- mapPhyCSet64x ; Someone else did something, try again...
3623 blr ; Return...
3624
3625;
3626; mapBumpBusy - increment the busy count on a mapping
3627; R3 points to mapping
3628;
3629
3630 .align 5
3631
3632mapBumpBusy:
3633 lwarx r4,0,r3 ; Get mpBusy
3634 addis r4,r4,0x0100 ; Bump the busy count
3635 stwcx. r4,0,r3 ; Save it back
3636 bne-- mapBumpBusy ; This did not work, try again...
3637 blr ; Leave...
3638
3639;
3640; mapDropBusy - increment the busy count on a mapping
3641; R3 points to mapping
3642;
3643
3644 .globl EXT(mapping_drop_busy)
3645 .align 5
3646
3647LEXT(mapping_drop_busy)
3648mapDropBusy:
3649 lwarx r4,0,r3 ; Get mpBusy
3650 addis r4,r4,0xFF00 ; Drop the busy count
3651 stwcx. r4,0,r3 ; Save it back
3652 bne-- mapDropBusy ; This did not work, try again...
3653 blr ; Leave...
3654
3655;
3656; mapDrainBusy - drain the busy count on a mapping
3657; R3 points to mapping
3658; Note: we already have a busy for ourselves. Only one
3659; busy per processor is allowed, so we just spin here
3660; waiting for the count to drop to 1.
3661; Also, the mapping can not be on any lists when we do this
3662; so all we are doing is waiting until it can be released.
3663;
3664
3665 .align 5
3666
3667mapDrainBusy:
3668 lwz r4,mpFlags(r3) ; Get mpBusy
3669 rlwinm r4,r4,8,24,31 ; Clean it up
3670 cmplwi r4,1 ; Is is just our busy?
3671 beqlr++ ; Yeah, it is clear...
3672 b mapDrainBusy ; Try again...
3673
3674
3675
3676;
3677; handleDSeg - handle a data segment fault
3678; handleISeg - handle an instruction segment fault
3679;
3680; All that we do here is to map these to DSI or ISI and insure
3681; that the hash bit is not set. This forces the fault code
3682; to also handle the missing segment.
3683;
3684; At entry R2 contains per_proc, R13 contains savarea pointer,
3685; and R11 is the exception code.
3686;
3687
3688 .align 5
3689 .globl EXT(handleDSeg)
3690
3691LEXT(handleDSeg)
3692
3693 li r11,T_DATA_ACCESS ; Change fault to DSI
3694 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3695 b EXT(handlePF) ; Join common...
3696
3697 .align 5
3698 .globl EXT(handleISeg)
3699
3700LEXT(handleISeg)
3701
3702 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3703 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3704 b EXT(handlePF) ; Join common...
3705
3706
3707/*
3708 * handlePF - handle a page fault interruption
3709 *
3710 * At entry R2 contains per_proc, R13 contains savarea pointer,
3711 * and R11 is the exception code.
3712 *
3713 * This first part does a quick check to see if we can handle the fault.
3714 * We canot handle any kind of protection exceptions here, so we pass
3715 * them up to the next level.
3716 *
3717 * NOTE: In order for a page-fault redrive to work, the translation miss
3718 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3719 * before we come here.
3720 */
3721
3722 .align 5
3723 .globl EXT(handlePF)
3724
3725LEXT(handlePF)
3726
3727 mfsprg r12,2 ; Get feature flags
3728 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3729 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3730 mtcrf 0x02,r12 ; move pf64Bit to cr6
3731 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3732 lwz r18,SAVflags(r13) ; Get the flags
3733
3734 beq-- gotIfetch ; We have an IFETCH here...
3735
3736 lwz r27,savedsisr(r13) ; Get the DSISR
3737 lwz r29,savedar(r13) ; Get the first half of the DAR
3738 lwz r30,savedar+4(r13) ; And second half
3739
3740 b ckIfProt ; Go check if this is a protection fault...
3741
3742gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3743 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3744 lwz r30,savesrr0+4(r13) ; And second half
3745 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3746
3747ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3748 li r20,64 ; Set a limit of 64 nests for sanity check
3749 bne-- hpfExit ; Yes... (probably not though)
91447636 3750
55e303ae
A
3751;
3752; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3753; should be loading the user pmap here.
3754;
3755
3756 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3757 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3758 mr r19,r2 ; Remember the per_proc
3759 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3760 mr r23,r30 ; Save the low part of faulting address
3761 beq-- hpfInKern ; Skip if we are in the kernel
3762 la r8,ppUserPmap(r19) ; Point to the current user pmap
3763
3764hpfInKern: mr r22,r29 ; Save the high part of faulting address
3765
3766 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3767
3768;
3769; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3770; predefined value that corresponds to no address space. When we see that value
3771; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3772; cause the proper SR to be loaded.
3773;
3774
3775 lwz r28,4(r8) ; Pick up the pmap
3776 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3777 mr r25,r28 ; Save the original pmap (in case we nest)
91447636
A
3778 lwz r0,pmapFlags(r28) ; Get pmap's flags
3779 bne hpfGVtest ; Segs are not ours if so...
55e303ae
A
3780 mfsrin r4,r30 ; Get the SR that was used for translation
3781 cmplwi r4,invalSpace ; Is this a simulated segment fault?
91447636 3782 bne++ hpfGVtest ; No...
55e303ae
A
3783
3784 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
91447636 3785 b hpfGVtest ; Join on up...
55e303ae
A
3786
3787 .align 5
3788
3789 nop ; Push hpfNest to a 32-byte boundary
3790 nop ; Push hpfNest to a 32-byte boundary
3791 nop ; Push hpfNest to a 32-byte boundary
55e303ae
A
3792
3793hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3794 mr r25,r28 ; Save the original pmap (in case we nest)
91447636
A
3795 lwz r0,pmapFlags(r28) ; Get pmap's flags
3796
3797hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3798 bne hpfGVxlate ; Yup, do accelerated shadow stuff
55e303ae
A
3799
3800;
3801; This is where we loop descending nested pmaps
3802;
3803
3804hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3805 addi r20,r20,-1 ; Count nest try
3806 bl sxlkShared ; Go get a shared lock on the mapping lists
3807 mr. r3,r3 ; Did we get the lock?
3808 bne-- hpfBadLock ; Nope...
3809
3810 mr r3,r28 ; Get the pmap pointer
3811 mr r4,r22 ; Get top of faulting vaddr
3812 mr r5,r23 ; Get bottom of faulting vaddr
3813 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3814
3815 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3816 mr. r31,r3 ; Save the mapping if we found it
3817 cmplwi cr1,r0,0 ; Check for removal
3818 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3819
3820 bt-- cr0_eq,hpfNotFound ; Not found or removing...
91447636
A
3821
3822 rlwinm r0,r7,0,mpType ; Isolate mapping type
3823 cmplwi r0,mpNest ; Are we again nested?
3824 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3825 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
55e303ae
A
3826 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3827
3828 lhz r21,mpSpace(r31) ; Get the space
3829
91447636 3830 bne++ hpfFoundIt ; No, we found our guy...
55e303ae
A
3831
3832
3833#if pmapTransSize != 12
3834#error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3835#endif
91447636 3836 cmplwi r0,mpLinkage ; Linkage mapping?
55e303ae 3837 cmplwi cr1,r20,0 ; Too many nestings?
91447636 3838 beq-- hpfSpclNest ; Do we need to do special handling?
55e303ae
A
3839
3840hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3841 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3842 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3843 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3844 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3845 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3846 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3847 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3848 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3849 slwi r11,r21,3 ; Multiply space by 8
3850 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3851 addc r23,r23,r9 ; Relocate bottom half of vaddr
3852 lwz r10,0(r10) ; Get the actual translation map
3853 slwi r12,r21,2 ; Multiply space by 4
3854 add r10,r10,r11 ; Add in the higher part of the index
3855 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3856 adde r22,r22,r8 ; Relocate the top half of the vaddr
3857 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3858 bl sxlkUnlock ; Unlock the search list
3859
91447636 3860 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
55e303ae 3861 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
91447636
A
3862 cmplwi r28,0 ; Is the pmap paddr valid?
3863 bne+ hpfNest ; Nest into new pmap...
3864 b hpfBadPmap ; Handle bad pmap
55e303ae 3865
91447636 3866hpfGetPmap64:
55e303ae 3867 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
91447636
A
3868 cmpldi r28,0 ; Is the pmap paddr valid?
3869 bne++ hpfNest ; Nest into new pmap...
3870 b hpfBadPmap ; Handle bad pmap
3871
55e303ae
A
3872
3873;
3874; Error condition. We only allow 64 nestings. This keeps us from having to
3875; check for recusive nests when we install them.
3876;
3877
3878 .align 5
3879
3880hpfNestTooMuch:
3881 lwz r20,savedsisr(r13) ; Get the DSISR
3882 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3883 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3884 ori r20,r20,1 ; Indicate that there was a nesting problem
3885 stw r20,savedsisr(r13) ; Stash it
3886 lwz r11,saveexception(r13) ; Restore the exception code
3887 b EXT(PFSExit) ; Yes... (probably not though)
3888
3889;
3890; Error condition - lock failed - this is fatal
3891;
3892
3893 .align 5
3894
3895hpfBadLock:
3896 lis r0,hi16(Choke) ; System abend
3897 ori r0,r0,lo16(Choke) ; System abend
3898 li r3,failMapping ; Show mapping failure
3899 sc
91447636
A
3900
3901;
3902; Error condition - space id selected an invalid pmap - fatal
3903;
3904
3905 .align 5
3906
3907hpfBadPmap:
3908 lis r0,hi16(Choke) ; System abend
3909 ori r0,r0,lo16(Choke) ; System abend
3910 li r3,failPmap ; Show invalid pmap
3911 sc
3912
55e303ae
A
3913;
3914; Did not find any kind of mapping
3915;
3916
3917 .align 5
3918
3919hpfNotFound:
3920 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3921 bl sxlkUnlock ; Unlock it
3922 lwz r11,saveexception(r13) ; Restore the exception code
3923
3924hpfExit: ; We need this because we can not do a relative branch
3925 b EXT(PFSExit) ; Yes... (probably not though)
3926
3927
3928;
3929; Here is where we handle special mappings. So far, the only use is to load a
3930; processor specific segment register for copy in/out handling.
3931;
3932; The only (so far implemented) special map is used for copyin/copyout.
3933; We keep a mapping of a "linkage" mapping in the per_proc.
3934; The linkage mapping is basically a nested pmap that is switched in
3935; as part of context switch. It relocates the appropriate user address
3936; space slice into the right place in the kernel.
3937;
3938
3939 .align 5
3940
3941hpfSpclNest:
91447636
A
3942 la r31,ppUMWmp(r19) ; Just point to the mapping
3943 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
55e303ae
A
3944 b hpfCSrch ; Go continue search...
3945
3946
3947;
3948; We have now found a mapping for the address we faulted on.
3949;
3950
3951;
3952; Here we go about calculating what the VSID should be. We concatanate
3953; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3954; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3955; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3956; the VSID.
3957;
3958; This is used both for segment handling and PTE handling
3959;
3960
3961
3962#if maxAdrSpb != 14
3963#error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3964#endif
3965
91447636
A
3966; Important non-volatile registers at this point ('home' means the final pmap/mapping found
3967; when a multi-level mapping has been successfully searched):
3968; r21: home space id number
3969; r22: relocated high-order 32 bits of vaddr
3970; r23: relocated low-order 32 bits of vaddr
3971; r25: pmap physical address
3972; r27: dsisr
3973; r28: home pmap physical address
3974; r29: high-order 32 bits of faulting vaddr
3975; r30: low-order 32 bits of faulting vaddr
3976; r31: mapping's physical address
3977
55e303ae
A
3978 .align 5
3979
3980hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
91447636 3981hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
55e303ae
A
3982 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3983 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3984 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
91447636 3985 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
55e303ae
A
3986 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3987 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3988 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3989 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
3990 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
3991 xor r14,r14,r20 ; Calculate the top half of VSID
3992 xor r15,r15,r21 ; Calculate the bottom half of the VSID
3993 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
3994 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
3995 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
3996 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
3997 or r12,r12,r15 ; Add key into the bottom of VSID
3998;
3999; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4000
4001 bne++ hpfPteMiss ; Nope, normal PTE miss...
4002
4003;
4004; Here is the only place that we make an entry in the pmap segment cache.
4005;
4006; Note that we do not make an entry in the segment cache for special
4007; nested mappings. This makes the copy in/out segment get refreshed
4008; when switching threads.
4009;
4010; The first thing that we do is to look up the ESID we are going to load
4011; into a segment in the pmap cache. If it is already there, this is
4012; a segment that appeared since the last time we switched address spaces.
4013; If all is correct, then it was another processors that made the cache
4014; entry. If not, well, it is an error that we should die on, but I have
4015; not figured a good way to trap it yet.
4016;
4017; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4018; an entry based on the generation number, update the cache entry, and
4019; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4020; entries that correspond to the last 4 bits (32:35 for 64-bit and
4021; 0:3 for 32-bit) of the ESID.
4022;
4023; Then we unlock and bail.
4024;
4025; First lock it. Then select a free slot or steal one based on the generation
4026; number. Then store it, update the allocation flags, and unlock.
4027;
4028; The cache entry contains an image of the ESID/VSID pair we would load for
4029; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4030;
4031; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4032; the current one, which may have changed because we nested.
4033;
4034; Also remember that we do not store the valid bit in the ESID. If we
4035; od, this will break some other stuff.
4036;
4037
4038 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4039
4040 mr r3,r25 ; Point to the pmap
37839358
A
4041 mr r4,r29 ; ESID high half
4042 mr r5,r30 ; ESID low half
55e303ae
A
4043 bl pmapCacheLookup ; Go see if this is in the cache already
4044
4045 mr. r3,r3 ; Did we find it?
4046 mr r4,r11 ; Copy this to a different register
4047
4048 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4049
4050 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4051 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4052
4053 cntlzw r7,r4 ; Find a free slot
4054
4055 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4056 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4057 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4058 addi r5,r4,1 ; Bump the generation number
4059 and r7,r7,r6 ; Clear bit number if none empty
4060 andc r8,r4,r6 ; Clear generation count if we found an empty
4061 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4062 or r7,r7,r8 ; Select a slot number
4063 li r8,0 ; Clear
4064 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4065 oris r8,r8,0x8000 ; Get the high bit on
4066 la r9,pmapSegCache(r25) ; Point to the segment cache
4067 slwi r6,r7,4 ; Get index into the segment cache
4068 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4069 srw r8,r8,r7 ; Get the mask
4070 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4071 li r0,0 ; Clear
4072 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4073 oris r0,r0,0xF000 ; Get the sub-tag mask
4074 add r9,r9,r6 ; Point to the cache slot
4075 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4076 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4077
4078 stw r29,sgcESID(r9) ; Save the top of the ESID
4079 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4080 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4081 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4082 or r10,r10,r5 ; Stick in subtag in case top half
4083 or r11,r11,r5 ; Stick in subtag in case bottom half
4084 stw r14,sgcVSID(r9) ; Save the top of the VSID
4085 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4086 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4087 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4088
4089 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4090 b hpfNoCacheEnt ; Go finish up...
4091
4092hpfSCSTbottom:
4093 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4094
4095
4096hpfNoCacheEnt:
4097 eieio ; Make sure cache is updated before lock
4098 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4099
4100
4101hpfNoCacheEnt2:
4102 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4103 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4104
4105;
4106; Make and enter 32-bit segment register
4107;
4108
4109 lwz r16,validSegs(r19) ; Get the valid SR flags
4110 xor r12,r12,r4 ; Alter the storage key before loading segment register
4111 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4112 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4113 lis r0,0x8000 ; Set bit 0
4114 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4115 srw r0,r0,r2 ; Get bit corresponding to SR
4116 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4117 or r16,r16,r0 ; Show that SR is valid
4118
4119 mtsrin r6,r30 ; Set the actual SR
4120
4121 stw r16,validSegs(r19) ; Set the valid SR flags
4122
4123 b hpfPteMiss ; SR loaded, go do a PTE...
4124
4125;
4126; Make and enter 64-bit segment look-aside buffer entry.
4127; Note that the cache entry is the right format except for valid bit.
4128; We also need to convert from long long to 64-bit register values.
4129;
4130
4131
4132 .align 5
4133
4134hpfLoadSeg64:
4135 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4136 sldi r8,r29,32 ; Move high order address over
4137 sldi r10,r14,32 ; Move high part of VSID over
4138
4139 not r3,r16 ; Make valids be 0s
4140 li r0,1 ; Prepare to set bit 0
4141
4142 cntlzd r17,r3 ; Find a free SLB
4143 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4144 or r9,r8,r30 ; Form full 64-bit address
4145 cmplwi r17,63 ; Did we find a free SLB entry?
4146 sldi r0,r0,63 ; Get bit 0 set
4147 or r10,r10,r12 ; Move in low part and keys
4148 addi r17,r17,1 ; Skip SLB 0 always
4149 blt++ hpfFreeSeg ; Yes, go load it...
4150
4151;
4152; No free SLB entries, select one that is in use and invalidate it
4153;
4154 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4155 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4156 addi r4,r4,1 ; Set next slot to steal
4157 slbmfee r7,r17 ; Get the entry that is in the selected spot
4158 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4159 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4160 srawi r2,r2,31 ; Get -1 if steal index still in range
4161 slbie r7 ; Invalidate the in-use SLB entry
4162 and r4,r4,r2 ; Reset steal index when it should wrap
4163 isync ;
4164
4165 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4166;
4167; We are now ready to stick the SLB entry in the SLB and mark it in use
4168;
4169
4170hpfFreeSeg:
4171 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4172 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4173 srd r0,r0,r4 ; Set bit mask for allocation
4174 oris r9,r9,0x0800 ; Turn on the valid bit
4175 or r16,r16,r0 ; Turn on the allocation flag
4176 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4177
4178 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4179 slbie r7 ; Blow away a potential duplicate
4180
4181hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4182
4183 std r16,validSegs(r19) ; Mark as valid
4184 b hpfPteMiss ; STE loaded, go do a PTE...
4185
4186;
4187; The segment has been set up and loaded if need be. Now we are ready to build the
4188; PTE and get it into the hash table.
4189;
4190; Note that there is actually a race here. If we start fault processing on
4191; a different pmap, i.e., we have descended into a nested pmap, it is possible
4192; that the nest could have been removed from the original pmap. We would
4193; succeed with this translation anyway. I do not think we need to worry
4194; about this (famous last words) because nobody should be unnesting anything
4195; if there are still people activily using them. It should be up to the
4196; higher level VM system to put the kibosh on this.
4197;
4198; There is also another race here: if we fault on the same mapping on more than
4199; one processor at the same time, we could end up with multiple PTEs for the same
4200; mapping. This is not a good thing.... We really only need one of the
4201; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4202; the mapping. If we see that set, we just abandon the handler and hope that by
4203; the time we restore context and restart the interrupted code, the fault has
4204; been resolved by the other guy. If not, we will take another fault.
4205;
4206
4207;
4208; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4209; It is required to stay there until after we call mapSelSlot!!!!
4210;
4211
4212 .align 5
4213
4214hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4215 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4216 li r3,mpHValid ; Get the PTE valid bit
4217 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4218 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4219 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4220 and. r12,r12,r3 ; Isolate the valid bit
4221 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4222 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
91447636
A
4223 rlwinm r0,r2,0,mpType ; Isolate mapping type
4224 cmplwi r0,mpBlock ; Is this a block mapping?
4225 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
55e303ae
A
4226 stwcx. r2,0,r31 ; Store the flags
4227 bne-- hpfPteMiss ; Collision, try again...
4228
4229 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4230
4231;
4232; At this point we are about to do the 32-bit PTE generation.
4233;
4234; The following is the R14:R15 pair that contains the "shifted" VSID:
4235;
4236; 1 2 3 4 4 5 6
4237; 0 8 6 4 2 0 8 6 3
4238; +--------+--------+--------+--------+--------+--------+--------+--------+
4239; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4240; +--------+--------+--------+--------+--------+--------+--------+--------+
4241;
4242; The 24 bits of the 32-bit architecture VSID is in the following:
4243;
4244; 1 2 3 4 4 5 6
4245; 0 8 6 4 2 0 8 6 3
4246; +--------+--------+--------+--------+--------+--------+--------+--------+
4247; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4248; +--------+--------+--------+--------+--------+--------+--------+--------+
4249;
4250
4251
4252hpfBldPTE32:
4253 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4254 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4255
4256 mfsdr1 r27 ; Get the hash table base address
4257
4258 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4259 rlwinm r18,r23,10,26,31 ; Extract the API
4260 xor r19,r15,r0 ; Calculate hash << 12
4261 mr r2,r25 ; Save the flag part of the mapping
4262 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4263 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4264 rlwinm r25,r25,0,0,19 ; Clear out the flags
4265 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4266 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4267 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4268 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4269 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4270 add r24,r24,r25 ; Adjust to true physical address
4271 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4272 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4273 and r19,r19,r16 ; Wrap hash table offset into the hash table
4274 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4275 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4276 add r19,r19,r27 ; Point to the PTEG
4277 subfic r20,r20,-4 ; Get negative offset to PCA
4278 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4279 add r20,r20,r27 ; Point to the PCA slot
4280
4281;
4282; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4283; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4284;
4285; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4286; that some other processor beat us and stuck in a PTE or that
4287; all we had was a simple segment exception and the PTE was there the whole time.
4288; If we find one a pointer, we are done.
4289;
4290
4291 mr r7,r20 ; Copy the PCA pointer
4292 bl mapLockPteg ; Lock the PTEG
4293
4294 lwz r12,mpPte(r31) ; Get the offset to the PTE
4295 mr r17,r6 ; Remember the PCA image
4296 mr r16,r6 ; Prime the post-select PCA image
4297 andi. r0,r12,mpHValid ; Is there a PTE here already?
4298 li r21,8 ; Get the number of slots
4299
4300 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4301
4302 bne- hpfBailOut ; Someone already did this for us...
4303
4304;
91447636 4305; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
55e303ae
A
4306; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4307; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4308; R4 returns the slot index.
4309;
4310; REMEMBER: CR7 indicates that we are building a block mapping.
4311;
4312
4313hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4314 mr r6,r17 ; Get back the original PCA
4315 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4316 blt- hpfBailOut ; Holy Cow, all slots are locked...
4317
4318 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4319
a3d08fcd
A
4320 cmplwi cr5,r3,1 ; Did we steal a slot?
4321 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
55e303ae
A
4322 mr r16,r6 ; Remember the PCA image after selection
4323 blt+ cr5,hpfInser32 ; Nope, no steal...
4324
4325 lwz r6,0(r19) ; Get the old PTE
4326 lwz r7,4(r19) ; Get the real part of the stealee
4327 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4328 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4329 srwi r3,r7,12 ; Change phys address to a ppnum
4330 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4331 cmplwi cr1,r3,0 ; Check if this is in RAM
4332 bne- hpfNoPte32 ; Could not get it, try for another...
4333
4334 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4335
4336hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4337
4338 sync ; Make sure the invalid is stored
4339 li r9,tlbieLock ; Get the TLBIE lock
4340 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4341
4342hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4343 mfsprg r4,0 ; Get the per_proc
4344 rlwinm r8,r6,25,18,31 ; Extract the space ID
4345 rlwinm r11,r6,25,18,31 ; Extract the space ID
4346 lwz r7,hwSteals(r4) ; Get the steal count
4347 srwi r2,r6,7 ; Align segment number with hash
4348 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4349 mr. r0,r0 ; Is it locked?
4350 srwi r0,r19,6 ; Align PTEG offset for back hash
4351 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4352 xor r11,r11,r0 ; Hash backwards to partial vaddr
4353 rlwinm r12,r2,14,0,3 ; Shift segment up
4354 mfsprg r2,2 ; Get feature flags
4355 li r0,1 ; Get our lock word
4356 rlwimi r12,r6,22,4,9 ; Move up the API
4357 bne- hpfTLBIE32 ; It is locked, go wait...
4358 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4359
4360 stwcx. r0,0,r9 ; Try to get it
4361 bne- hpfTLBIE32 ; We was beat...
4362 addi r7,r7,1 ; Bump the steal count
4363
4364 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4365 li r0,0 ; Lock clear value
4366
4367 tlbie r12 ; Invalidate it everywhere
4368
91447636 4369
55e303ae
A
4370 beq- hpfNoTS32 ; Can not have MP on this machine...
4371
4372 eieio ; Make sure that the tlbie happens first
4373 tlbsync ; Wait for everyone to catch up
4374 sync ; Make sure of it all
91447636
A
4375
4376hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
5eebf738
A
4377
4378 stw r7,hwSteals(r4) ; Save the steal count
55e303ae
A
4379 bgt cr5,hpfInser32 ; We just stole a block mapping...
4380
4381 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4382
4383 la r11,ppLink+4(r3) ; Point to the master RC copy
4384 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4385 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4386
4387hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4388 or r0,r0,r2 ; Merge in the new RC
4389 stwcx. r0,0,r11 ; Try to stick it back
4390 bne- hpfMrgRC32 ; Try again if we collided...
4391
4392
91447636 4393hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
55e303ae
A
4394 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4395
4396 lhz r10,mpSpace(r7) ; Get the space
4397 lwz r9,mpVAddr+4(r7) ; And the vaddr
4398 cmplw cr1,r10,r8 ; Is this one of ours?
4399 xor r9,r12,r9 ; Compare virtual address
4400 cmplwi r9,0x1000 ; See if we really match
4401 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4402 beq+ hpfFPnch2 ; Yes, found ours...
4403
4404 lwz r7,mpAlias+4(r7) ; Chain on to the next
4405 b hpfFPnch ; Check it out...
4406
4407hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4408 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4409 bl mapPhysUnlock ; Unlock the physent now
4410
4411hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4412
4413 stw r24,4(r19) ; Stuff in the real part of the PTE
4414 eieio ; Make sure this gets there first
4415
4416 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4417 mr r17,r16 ; Get the PCA image to save
4418 b hpfFinish ; Go join the common exit code...
4419
4420
4421;
4422; At this point we are about to do the 64-bit PTE generation.
4423;
4424; The following is the R14:R15 pair that contains the "shifted" VSID:
4425;
4426; 1 2 3 4 4 5 6
4427; 0 8 6 4 2 0 8 6 3
4428; +--------+--------+--------+--------+--------+--------+--------+--------+
4429; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4430; +--------+--------+--------+--------+--------+--------+--------+--------+
4431;
4432;
4433
4434 .align 5
4435
4436hpfBldPTE64:
4437 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4438 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4439
4440 mfsdr1 r27 ; Get the hash table base address
4441
4442 sldi r11,r22,32 ; Slide top of adjusted EA over
4443 sldi r14,r14,32 ; Slide top of VSID over
4444 rlwinm r5,r27,0,27,31 ; Isolate the size
4445 eqv r16,r16,r16 ; Get all foxes here
4446 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4447 mr r2,r10 ; Save the flag part of the mapping
4448 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4449 rldicr r27,r27,0,45 ; Clean up the hash table base
4450 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4451 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4452 subfic r5,r5,46 ; Get number of leading zeros
4453 xor r19,r0,r15 ; Calculate hash
4454 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4455 srd r16,r16,r5 ; Shift over to get length of table
4456 srdi r19,r19,5 ; Convert page offset to hash table offset
4457 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4458 rldicr r10,r10,0,51 ; Clear out flags
4459 sldi r24,r24,12 ; Change ppnum to physical address
4460 sub r11,r11,r10 ; Get the offset from the base mapping
4461 and r19,r19,r16 ; Wrap into hash table
4462 add r24,r24,r11 ; Get actual physical address of this page
4463 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4464 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4465 subfic r20,r20,-4 ; Get negative offset to PCA
4466 ori r24,r24,lo16(mpR) ; Force on the reference bit
4467 add r20,r20,r27 ; Point to the PCA slot
4468 add r19,r19,r27 ; Point to the PTEG
4469
4470;
4471; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4472; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4473;
4474; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4475; that some other processor beat us and stuck in a PTE or that
4476; all we had was a simple segment exception and the PTE was there the whole time.
4477; If we find one a pointer, we are done.
4478;
4479
4480 mr r7,r20 ; Copy the PCA pointer
4481 bl mapLockPteg ; Lock the PTEG
4482
4483 lwz r12,mpPte(r31) ; Get the offset to the PTE
4484 mr r17,r6 ; Remember the PCA image
4485 mr r18,r6 ; Prime post-selection PCA image
4486 andi. r0,r12,mpHValid ; See if we have a PTE now
4487 li r21,8 ; Get the number of slots
4488
4489 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4490
4491 bne-- hpfBailOut ; Someone already did this for us...
4492
4493;
4494; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4495; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4496; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4497; R4 returns the slot index.
4498;
4499; REMEMBER: CR7 indicates that we are building a block mapping.
4500;
4501
4502hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4503 mr r6,r17 ; Restore original state of PCA
4504 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4505 blt- hpfBailOut ; Holy Cow, all slots are locked...
4506
4507 bl mapSelSlot ; Go select a slot
4508
4509 cmplwi cr5,r3,1 ; Did we steal a slot?
55e303ae 4510 mr r18,r6 ; Remember the PCA image after selection
a3d08fcd 4511 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
55e303ae
A
4512 lwz r10,hwSteals(r2) ; Get the steal count
4513 blt++ cr5,hpfInser64 ; Nope, no steal...
4514
4515 ld r6,0(r19) ; Get the old PTE
4516 ld r7,8(r19) ; Get the real part of the stealee
4517 rldicr r6,r6,0,62 ; Clear the valid bit
4518 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4519 srdi r3,r7,12 ; Change page address to a page address
4520 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4521 cmplwi cr1,r3,0 ; Check if this is in RAM
4522 bne-- hpfNoPte64 ; Could not get it, try for another...
4523
4524 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4525
4526hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4527 li r9,tlbieLock ; Get the TLBIE lock
4528
4529 srdi r11,r6,5 ; Shift VSID over for back hash
4530 mfsprg r4,0 ; Get the per_proc
4531 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4532 sync ; Make sure the invalid is stored
4533
4534 sldi r12,r6,16 ; Move AVPN to EA position
4535 sldi r11,r11,5 ; Move this to the page position
4536
4537hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4538 mr. r0,r0 ; Is it locked?
4539 li r0,1 ; Get our lock word
4540 bne-- hpfTLBIE65 ; It is locked, go wait...
4541
4542 stwcx. r0,0,r9 ; Try to get it
4543 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4544 rldicl r8,r6,52,50 ; Isolate the address space ID
4545 bne-- hpfTLBIE64 ; We was beat...
4546 addi r10,r10,1 ; Bump the steal count
4547
4548 rldicl r11,r12,0,16 ; Clear cause the book says so
4549 li r0,0 ; Lock clear value
4550
4551 tlbie r11 ; Invalidate it everywhere
4552
55e303ae
A
4553 mr r7,r8 ; Get a copy of the space ID
4554 eieio ; Make sure that the tlbie happens first
4555 rldimi r7,r7,14,36 ; Copy address space to make hash value
4556 tlbsync ; Wait for everyone to catch up
4557 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
55e303ae
A
4558 srdi r2,r6,26 ; Shift original segment down to bottom
4559
4560 ptesync ; Make sure of it all
4561 xor r7,r7,r2 ; Compute original segment
91447636 4562 stw r0,tlbieLock(0) ; Clear the tlbie lock
55e303ae
A
4563
4564 stw r10,hwSteals(r4) ; Save the steal count
4565 bgt cr5,hpfInser64 ; We just stole a block mapping...
4566
4567 rldimi r12,r7,28,0 ; Insert decoded segment
4568 rldicl r4,r12,0,13 ; Trim to max supported address
4569
4570 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4571
4572 la r11,ppLink+4(r3) ; Point to the master RC copy
4573 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4574 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4575
4576hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
91447636 4577 li r12,ppLFAmask ; Get mask to clean up alias pointer
55e303ae 4578 or r0,r0,r2 ; Merge in the new RC
91447636 4579 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
4580 stwcx. r0,0,r11 ; Try to stick it back
4581 bne-- hpfMrgRC64 ; Try again if we collided...
4582
4583hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4584 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4585
4586 lhz r10,mpSpace(r7) ; Get the space
4587 ld r9,mpVAddr(r7) ; And the vaddr
4588 cmplw cr1,r10,r8 ; Is this one of ours?
4589 xor r9,r4,r9 ; Compare virtual address
4590 cmpldi r9,0x1000 ; See if we really match
4591 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4592 beq++ hpfFPnch2x ; Yes, found ours...
4593
4594 ld r7,mpAlias(r7) ; Chain on to the next
4595 b hpfFPnchx ; Check it out...
4596
4597 .align 5
4598
4599hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4600 stwcx. r7,0,r7 ; Kill reservation
4601
4602hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4603 mr. r0,r0 ; Is it locked?
4604 beq++ hpfTLBIE64 ; Yup, wait for it...
4605 b hpfTLBIE63 ; Nope, try again..
4606
4607
4608
4609hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4610 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4611 bl mapPhysUnlock ; Unlock the physent now
4612
4613
4614hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4615 eieio ; Make sure this gets there first
4616 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4617 mr r17,r18 ; Get the PCA image to set
4618 b hpfFinish ; Go join the common exit code...
4619
4620hpfLostPhys:
4621 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4622 ori r0,r0,lo16(Choke) ; System abend
4623 sc
4624
4625;
4626; This is the common code we execute when we are finished setting up the PTE.
4627;
4628
4629 .align 5
4630
4631hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4632 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4633 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4634 stw r4,mpPte(r31) ; Remember our PTE
4635
4636hpfBailOut: eieio ; Make sure all updates come first
4637 stw r17,0(r20) ; Unlock and set the final PCA
4638
4639;
4640; This is where we go if we have started processing the fault, but find that someone
4641; else has taken care of it.
4642;
4643
4644hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4645 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4646 sth r2,mpFlags+2(r31) ; Set it
4647
4648 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4649 bl sxlkUnlock ; Unlock the search list
4650
4651 li r11,T_IN_VAIN ; Say that it was handled
4652 b EXT(PFSExit) ; Leave...
4653
4654;
4655; This is where we go when we find that someone else
4656; is in the process of handling the fault.
4657;
4658
4659hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4660 stwcx. r3,0,r3 ; Do it
4661
4662 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4663 bl sxlkUnlock ; Unlock the search list
4664
4665 li r11,T_IN_VAIN ; Say that it was handled
4666 b EXT(PFSExit) ; Leave...
4667
91447636
A
4668;
4669; Guest shadow assist -- page fault handler
4670;
4671; Here we handle a fault in a guest pmap that has the guest shadow mapping
4672; assist active. We locate the VMM pmap extension block, which contains an
4673; index over the discontiguous multi-page shadow hash table. The index
4674; corresponding to our vaddr is selected, and the selected group within
4675; that page is searched for a valid and active entry that contains
4676; our vaddr and space id. The search is pipelined, so that we may fetch
4677; the next slot while examining the current slot for a hit. The final
4678; search iteration is unrolled so that we don't fetch beyond the end of
4679; our group, which could have dire consequences depending upon where the
4680; physical hash page is located.
4681;
4682; The VMM pmap extension block occupies a page. Begining at offset 0, we
4683; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4684; after the pmap_vmm_ext is the hash table physical address index, a
4685; linear list of 64-bit physical addresses of the pages that comprise
4686; the hash table.
4687;
4688; In the event that we succesfully locate a guest mapping, we re-join
4689; the page fault path at hpfGVfound with the mapping's address in r31;
4690; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4691; a share of the pmap search lock for the host pmap with the host pmap's
4692; address in r28, the guest pmap's space id in r21, and the guest pmap's
4693; flags in r12.
4694;
4695
4696 .align 5
4697hpfGVxlate:
4698 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4699
4700 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4701 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4702 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4703 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4704 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4705 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4706 lwz r6,vxsGpf(r11) ; Get guest fault count
4707
4708 srwi r3,r10,12 ; Form shadow hash:
4709 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4710 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4711 ; Form index offset from hash page number
4712 add r31,r31,r4 ; r31 <- hash page index entry
4713 lwz r31,4(r31) ; r31 <- hash page paddr
4714 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4715 ; r31 <- hash group paddr
4716
4717 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4718 bl sxlkShared ; Go get a shared lock on the mapping lists
4719 mr. r3,r3 ; Did we get the lock?
4720 bne- hpfBadLock ; Nope...
4721
4722 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4723 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4724 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4725 addi r6,r6,1 ; Increment guest fault count
4726 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4727 mtctr r0 ; in this group
4728 stw r6,vxsGpf(r11) ; Update guest fault count
4729 b hpfGVlp32
4730
4731 .align 5
4732hpfGVlp32:
4733 mr r6,r3 ; r6 <- current mapping slot's flags
4734 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4735 mr r7,r4 ; r7 <- current mapping slot's space ID
4736 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4737 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4738 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4739 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4740 xor r7,r7,r21 ; Compare space ID
4741 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4742 xor r8,r8,r10 ; Compare virtual address
4743 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4744 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4745
4746 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4747 bdnz hpfGVlp32 ; Iterate
4748
4749 clrrwi r5,r5,12 ; Remove flags from virtual address
4750 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4751 xor r4,r4,r21 ; Compare space ID
4752 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4753 xor r5,r5,r10 ; Compare virtual address
4754 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4755 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4756
4757 b hpfGVmiss
4758
4759 .align 5
4760hpfGV64:
4761 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4762 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4763 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4764 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4765 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4766 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4767 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4768 lwz r6,vxsGpf(r11) ; Get guest fault count
4769
4770 srwi r3,r10,12 ; Form shadow hash:
4771 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4772 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4773 ; Form index offset from hash page number
4774 add r31,r31,r4 ; r31 <- hash page index entry
4775 ld r31,0(r31) ; r31 <- hash page paddr
4776 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4777 ; r31 <- hash group paddr
4778
4779 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4780 bl sxlkShared ; Go get a shared lock on the mapping lists
4781 mr. r3,r3 ; Did we get the lock?
4782 bne-- hpfBadLock ; Nope...
55e303ae 4783
91447636
A
4784 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4785 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4786 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4787 addi r6,r6,1 ; Increment guest fault count
4788 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4789 mtctr r0 ; in this group
4790 stw r6,vxsGpf(r11) ; Update guest fault count
4791 b hpfGVlp64
4792
4793 .align 5
4794hpfGVlp64:
4795 mr r6,r3 ; r6 <- current mapping slot's flags
4796 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4797 mr r7,r4 ; r7 <- current mapping slot's space ID
4798 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4799 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4800 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4801 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4802 xor r7,r7,r21 ; Compare space ID
4803 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4804 xor r8,r8,r10 ; Compare virtual address
4805 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4806 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4807
4808 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4809 bdnz hpfGVlp64 ; Iterate
4810
4811 clrrdi r5,r5,12 ; Remove flags from virtual address
4812 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4813 xor r4,r4,r21 ; Compare space ID
4814 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4815 xor r5,r5,r10 ; Compare virtual address
4816 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4817 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4818
4819hpfGVmiss:
4820 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4821 addi r6,r6,1 ; Increment miss count
4822 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4823 b hpfNotFound
55e303ae
A
4824
4825/*
4826 * hw_set_user_space(pmap)
4827 * hw_set_user_space_dis(pmap)
4828 *
4829 * Indicate whether memory space needs to be switched.
4830 * We really need to turn off interrupts here, because we need to be non-preemptable
de355530
A
4831 *
4832 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4833 * register usage here. The VMM switch code in vmachmon.s that calls this
4834 * know what registers are in use. Check that if these change.
4835 */
1c79356b 4836
1c79356b 4837
55e303ae
A
4838
4839 .align 5
4840 .globl EXT(hw_set_user_space)
4841
4842LEXT(hw_set_user_space)
4843
4844 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4845 mfmsr r10 ; Get the current MSR
4846 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4847 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4848 andc r10,r10,r8 ; Turn off VEC, FP for good
4849 andc r9,r10,r9 ; Turn off EE also
4850 mtmsr r9 ; Disable them
4851 isync ; Make sure FP and vec are off
91447636
A
4852 mfsprg r6,1 ; Get the current activation
4853 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
55e303ae
A
4854 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4855 mfsprg r4,2 ; The the feature flags
4856 lwz r7,pmapvr(r3) ; Get the v to r translation
4857 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4858 mtcrf 0x80,r4 ; Get the Altivec flag
4859 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4860 cmplw cr1,r3,r2 ; Same address space as before?
4861 stw r7,ppUserPmap(r6) ; Show our real pmap address
4862 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4863 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4864 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4865 mtmsr r10 ; Restore interruptions
4866 beqlr-- cr1 ; Leave if the same address space or not Altivec
4867
4868 dssall ; Need to kill all data streams if adrsp changed
4869 sync
4870 blr ; Return...
4871
4872 .align 5
4873 .globl EXT(hw_set_user_space_dis)
4874
4875LEXT(hw_set_user_space_dis)
4876
4877 lwz r7,pmapvr(r3) ; Get the v to r translation
4878 mfsprg r4,2 ; The the feature flags
4879 lwz r8,pmapvr+4(r3) ; Get the v to r translation
91447636
A
4880 mfsprg r6,1 ; Get the current activation
4881 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
55e303ae
A
4882 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4883 mtcrf 0x80,r4 ; Get the Altivec flag
4884 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4885 cmplw cr1,r3,r2 ; Same address space as before?
4886 stw r7,ppUserPmap(r6) ; Show our real pmap address
4887 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4888 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4889 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4890 beqlr-- cr1 ; Leave if the same
4891
4892 dssall ; Need to kill all data streams if adrsp changed
4893 sync
4894 blr ; Return...
4895
4896/* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4897 *
4898 * Lock must already be held on mapping block list
4899 * returns 0 if all slots filled.
4900 * returns n if a slot is found and it is not the last
4901 * returns -n if a slot is found and it is the last
4902 * when n and -n are returned, the corresponding bit is cleared
4903 * the mapping is zeroed out before return
4904 *
4905 */
4906
4907 .align 5
4908 .globl EXT(mapalc1)
4909
4910LEXT(mapalc1)
4911 lwz r4,mbfree(r3) ; Get the 1st mask
4912 lis r0,0x8000 ; Get the mask to clear the first free bit
4913 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4914 mr r12,r3 ; Save the block ptr
4915 cntlzw r3,r4 ; Get first 1-bit in 1st word
4916 srw. r9,r0,r3 ; Get bit corresponding to first free one
4917 cntlzw r10,r5 ; Get first free field in second word
4918 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4919 bne mapalc1f ; Found one in 1st word
4920
4921 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4922 li r3,0 ; assume failure return
4923 andc r5,r5,r9 ; Turn it off
4924 beqlr-- ; There are no 1 bits left...
4925 addi r3,r10,32 ; set the correct number
4926
4927mapalc1f:
4928 or. r0,r4,r5 ; any more bits set?
4929 stw r4,mbfree(r12) ; update bitmasks
4930 stw r5,mbfree+4(r12)
4931
4932 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4933 addi r7,r6,32
4934 dcbz r6,r12 ; clear the 64-byte mapping
4935 dcbz r7,r12
4936
4937 bnelr++ ; return if another bit remains set
4938
4939 neg r3,r3 ; indicate we just returned the last bit
4940 blr
4941
4942
4943/* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4944 *
4945 * Lock must already be held on mapping block list
4946 * returns 0 if all slots filled.
4947 * returns n if a slot is found and it is not the last
4948 * returns -n if a slot is found and it is the last
4949 * when n and -n are returned, the corresponding bits are cleared
4950 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4951 * the mapping is zero'd out before return
4952 */
4953
4954 .align 5
4955 .globl EXT(mapalc2)
4956LEXT(mapalc2)
4957 lwz r4,mbfree(r3) ; Get the first mask
4958 lis r0,0x8000 ; Get the mask to clear the first free bit
4959 lwz r5,mbfree+4(r3) ; Get the second mask
4960 mr r12,r3 ; Save the block ptr
4961 slwi r6,r4,1 ; shift first word over
4962 and r6,r4,r6 ; lite start of double bit runs in 1st word
4963 slwi r7,r5,1 ; shift 2nd word over
4964 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4965 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4966 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4967 cntlzw r10,r7 ; Get first free field in second word
4968 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4969 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4970 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4971 bne mapalc2a ; Found two consecutive free bits in 1st word
4972
4973 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4974 li r3,0 ; assume failure
4975 srwi r11,r9,1 ; get mask for 2nd bit
4976 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4977 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4978 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4979 addi r3,r10,32 ; set the correct number
4980
4981mapalc2a:
4982 or. r0,r4,r5 ; any more bits set?
4983 stw r4,mbfree(r12) ; update bitmasks
4984 stw r5,mbfree+4(r12)
4985 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4986 addi r7,r6,32
4987 addi r8,r6,64
4988 addi r9,r6,96
4989 dcbz r6,r12 ; zero out the 128-byte mapping
4990 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
4991 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
4992 dcbz r9,r12
4993
4994 bnelr++ ; return if another bit remains set
4995
4996 neg r3,r3 ; indicate we just returned the last bit
4997 blr
4998
4999mapalc2c:
5000 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5001 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5002 beqlr ; no, we failed
5003 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5004 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5005 li r3,31 ; get index of this field
5006 b mapalc2a
5007
5008
5009;
5010; This routine initialzes the hash table and PCA.
5011; It is done here because we may need to be 64-bit to do it.
5012;
5013
5014 .align 5
5015 .globl EXT(hw_hash_init)
5016
5017LEXT(hw_hash_init)
5018
5019 mfsprg r10,2 ; Get feature flags
5020 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5021 mtcrf 0x02,r10 ; move pf64Bit to cr6
5022 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5023 lis r4,0xFF01 ; Set all slots free and start steal at end
5024 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5025 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5026
5027 lwz r12,0(r12) ; Get hash table size
5028 li r3,0 ; Get start
5029 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5030
5031 lwz r11,4(r11) ; Get hash table base
5032
5033hhiNext32: cmplw r3,r12 ; Have we reached the end?
5034 bge- hhiCPCA32 ; Yes...
5035 dcbz r3,r11 ; Clear the line
5036 addi r3,r3,32 ; Next one...
5037 b hhiNext32 ; Go on...
5038
5039hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5040 li r3,-4 ; Displacement to first PCA entry
5041 neg r12,r12 ; Get negative end of PCA
5042
5043hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5044 subi r3,r3,4 ; Next slot
5045 cmpw r3,r12 ; Have we finished?
5046 bge+ hhiNPCA32 ; Not yet...
5047 blr ; Leave...
5048
5049hhiSF: mfmsr r9 ; Save the MSR
5050 li r8,1 ; Get a 1
5051 mr r0,r9 ; Get a copy of the MSR
5052 ld r11,0(r11) ; Get hash table base
5053 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5054 mtmsrd r0 ; Turn on SF
5055 isync
5056
5057
5058hhiNext64: cmpld r3,r12 ; Have we reached the end?
5059 bge-- hhiCPCA64 ; Yes...
5060 dcbz128 r3,r11 ; Clear the line
5061 addi r3,r3,128 ; Next one...
5062 b hhiNext64 ; Go on...
5063
5064hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5065 li r3,-4 ; Displacement to first PCA entry
5066 neg r12,r12 ; Get negative end of PCA
5067
5068hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5069 subi r3,r3,4 ; Next slot
5070 cmpd r3,r12 ; Have we finished?
5071 bge++ hhiNPCA64 ; Not yet...
5072
5073 mtmsrd r9 ; Turn off SF if it was off
5074 isync
5075 blr ; Leave...
5076
5077
5078;
5079; This routine sets up the hardware to start translation.
5080; Note that we do NOT start translation.
5081;
5082
5083 .align 5
5084 .globl EXT(hw_setup_trans)
5085
5086LEXT(hw_setup_trans)
5087
5088 mfsprg r11,0 ; Get the per_proc block
5089 mfsprg r12,2 ; Get feature flags
5090 li r0,0 ; Get a 0
5091 li r2,1 ; And a 1
5092 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5093 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5094 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5095 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5096 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5097
5098 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5099
5100 li r9,0 ; Clear out a register
5101 sync
5102 isync
5103 mtdbatu 0,r9 ; Invalidate maps
5104 mtdbatl 0,r9 ; Invalidate maps
5105 mtdbatu 1,r9 ; Invalidate maps
5106 mtdbatl 1,r9 ; Invalidate maps
5107 mtdbatu 2,r9 ; Invalidate maps
5108 mtdbatl 2,r9 ; Invalidate maps
5109 mtdbatu 3,r9 ; Invalidate maps
5110 mtdbatl 3,r9 ; Invalidate maps
5111
5112 mtibatu 0,r9 ; Invalidate maps
5113 mtibatl 0,r9 ; Invalidate maps
5114 mtibatu 1,r9 ; Invalidate maps
5115 mtibatl 1,r9 ; Invalidate maps
5116 mtibatu 2,r9 ; Invalidate maps
5117 mtibatl 2,r9 ; Invalidate maps
5118 mtibatu 3,r9 ; Invalidate maps
5119 mtibatl 3,r9 ; Invalidate maps
5120
5121 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5122 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5123 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5124 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5125 lwz r11,4(r11) ; Get hash table base
5126 lwz r12,0(r12) ; Get hash table size
5127 subi r12,r12,1 ; Back off by 1
5128 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5129
5130 mtsdr1 r11 ; Ok, we now have the hash table set up
5131 sync
5132
5133 li r12,invalSpace ; Get the invalid segment value
5134 li r10,0 ; Start low
5135
5136hstsetsr: mtsrin r12,r10 ; Set the SR
5137 addis r10,r10,0x1000 ; Bump the segment
5138 mr. r10,r10 ; Are we finished?
5139 bne+ hstsetsr ; Nope...
5140 sync
5141 blr ; Return...
5142
5143;
5144; 64-bit version
5145;
5146
5147hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5148 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5149 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5150 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5151 ld r11,0(r11) ; Get hash table base
5152 lwz r12,0(r12) ; Get hash table size
5153 cntlzw r10,r12 ; Get the number of bits
5154 subfic r10,r10,13 ; Get the extra bits we need
5155 or r11,r11,r10 ; Add the size field to SDR1
5156
5157 mtsdr1 r11 ; Ok, we now have the hash table set up
5158 sync
5159
5160 li r0,0 ; Set an SLB slot index of 0
5161 slbia ; Trash all SLB entries (except for entry 0 that is)
5162 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5163 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5164 slbie r7 ; Invalidate it
5165
5166 blr ; Return...
5167
5168
5169;
5170; This routine turns on translation for the first time on a processor
5171;
5172
5173 .align 5
5174 .globl EXT(hw_start_trans)
5175
5176LEXT(hw_start_trans)
5177
5178
5179 mfmsr r10 ; Get the msr
5180 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5181
5182 mtmsr r10 ; Everything falls apart here
5183 isync
5184
5185 blr ; Back to it.
5186
5187
5188
5189;
5190; This routine validates a segment register.
5191; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5192;
5193; r3 = virtual pmap
5194; r4 = segment[0:31]
5195; r5 = segment[32:63]
5196; r6 = va[0:31]
5197; r7 = va[32:63]
5198;
5199; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5200; Note that there is no reason to apply the key modifier here because this is only
5201; used for kernel accesses.
5202;
5203
5204 .align 5
5205 .globl EXT(hw_map_seg)
5206
5207LEXT(hw_map_seg)
5208
5209 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5210 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5211 mfsprg r10,2 ; Get feature flags
55e303ae
A
5212
5213;
5214; Note: the following code would problably be easier to follow if I split it,
5215; but I just wanted to see if I could write this to work on both 32- and 64-bit
5216; machines combined.
5217;
5218
5219;
5220; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5221; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5222
5223 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5224 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5225 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5226 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5227 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5228 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5229 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5230 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5231 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5232 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5233
5234 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5235 ; concatenated together. There is garbage
5236 ; at the top for 64-bit but we will clean
5237 ; that out later.
5238 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5239
5240
5241;
5242; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5243; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5244;
5245
5246;
5247; What we have now is:
5248;
5249; 0 0 1 2 3 4 4 5 6
5250; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5251; +--------+--------+--------+--------+--------+--------+--------+--------+
5252; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5253; +--------+--------+--------+--------+--------+--------+--------+--------+
5254; 0 0 1 2 3 - for 32-bit machines
5255; 0 8 6 4 1
5256;
5257; 0 0 1 2 3 4 4 5 6
5258; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5259; +--------+--------+--------+--------+--------+--------+--------+--------+
5260; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5261; +--------+--------+--------+--------+--------+--------+--------+--------+
5262; 0 0 1 2 3 - for 32-bit machines
5263; 0 8 6 4 1
5264;
5265; 0 0 1 2 3 4 4 5 6
5266; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5267; +--------+--------+--------+--------+--------+--------+--------+--------+
5268; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5269; +--------+--------+--------+--------+--------+--------+--------+--------+
5270; 0 0 1 2 3 - for 32-bit machines
5271; 0 8 6 4 1
5272
5273
5274 xor r8,r8,r2 ; Calculate VSID
5275
5276 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
91447636 5277 mfsprg r12,0 ; Get the per_proc
55e303ae
A
5278 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5279 mfmsr r6 ; Get current MSR
5280 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5281 mtmsrd r0,1 ; Set only the EE bit to 0
5282 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5283 mfmsr r11 ; Get the MSR right now, after disabling EE
5284 andc r2,r11,r2 ; Turn off translation now
5285 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5286 or r11,r11,r6 ; Turn on the EE bit if it was on
5287 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5288 isync ; Hang out a bit
5289
5290 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5291 sldi r9,r9,9 ; Position the key and noex bit
5292
5293 rldimi r5,r8,12,0 ; Form the VSID/key
5294
5295 not r3,r6 ; Make valids be 0s
5296
5297 cntlzd r7,r3 ; Find a free SLB
5298 cmplwi r7,63 ; Did we find a free SLB entry?
5299
5300 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5301
5302 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5303 addi r7,r7,1 ; Make sure we skip slb 0
5304 blt++ hmsFreeSeg ; Yes, go load it...
5305
5306;
5307; No free SLB entries, select one that is in use and invalidate it
5308;
5309 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5310 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5311 addi r2,r2,1 ; Set next slot to steal
5312 slbmfee r3,r7 ; Get the entry that is in the selected spot
5313 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5314 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5315 srawi r8,r8,31 ; Get -1 if steal index still in range
5316 slbie r3 ; Invalidate the in-use SLB entry
5317 and r2,r2,r8 ; Reset steal index when it should wrap
5318 isync ;
5319
5320 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5321;
5322; We are now ready to stick the SLB entry in the SLB and mark it in use
5323;
5324
5325hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5326 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5327 srd r0,r0,r2 ; Set bit mask for allocation
5328 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5329 or r6,r6,r0 ; Turn on the allocation flag
5330
5331 slbmte r5,r4 ; Make that SLB entry
5332
5333 std r6,validSegs(r12) ; Mark as valid
5334 mtmsrd r11 ; Restore the MSR
5335 isync
5336 blr ; Back to it...
5337
5338 .align 5
5339
91447636
A
5340hms32bit:
5341 mfsprg r12,1 ; Get the current activation
5342 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5343 rlwinm r8,r8,0,8,31 ; Clean up the VSID
55e303ae
A
5344 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5345 lis r0,0x8000 ; Set bit 0
5346 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5347 srw r0,r0,r2 ; Get bit corresponding to SR
5348 addi r7,r12,validSegs ; Point to the valid segment flags directly
5349
5350 mtsrin r8,r4 ; Set the actual SR
5351 isync ; Need to make sure this is done
5352
5353hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5354 or r6,r6,r0 ; Show that SR is valid
5355 stwcx. r6,0,r7 ; Set the valid SR flags
5356 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5357
5358 blr ; Back to it...
5359
5360
5361;
5362; This routine invalidates a segment register.
5363;
5364
5365 .align 5
5366 .globl EXT(hw_blow_seg)
5367
5368LEXT(hw_blow_seg)
5369
5370 mfsprg r10,2 ; Get feature flags
55e303ae
A
5371 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5372
55e303ae
A
5373 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5374
5375 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5376
5377 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5378 mfmsr r6 ; Get current MSR
5379 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5380 mtmsrd r0,1 ; Set only the EE bit to 0
5381 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5382 mfmsr r11 ; Get the MSR right now, after disabling EE
5383 andc r2,r11,r2 ; Turn off translation now
5384 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5385 or r11,r11,r6 ; Turn on the EE bit if it was on
5386 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5387 isync ; Hang out a bit
5388
5389 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5390
5391 slbie r9 ; Invalidate the associated SLB entry
5392
5393 mtmsrd r11 ; Restore the MSR
5394 isync
5395 blr ; Back to it.
5396
5397 .align 5
5398
91447636
A
5399hbs32bit:
5400 mfsprg r12,1 ; Get the current activation
5401 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5402 addi r7,r12,validSegs ; Point to the valid segment flags directly
5403 lwarx r4,0,r7 ; Get and reserve the valid segment flags
55e303ae
A
5404 rlwinm r6,r9,4,28,31 ; Convert segment to number
5405 lis r2,0x8000 ; Set up a mask
5406 srw r2,r2,r6 ; Make a mask
5407 and. r0,r4,r2 ; See if this is even valid
5408 li r5,invalSpace ; Set the invalid address space VSID
5409 beqlr ; Leave if already invalid...
5410
5411 mtsrin r5,r9 ; Slam the segment register
5412 isync ; Need to make sure this is done
5413
5414hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5415 stwcx. r4,0,r7 ; Set the valid SR flags
5416 beqlr++ ; Stored ok, no interrupt, time to leave...
5417
5418 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5419 b hbsrupt ; Try again...
5420
5421;
5422; This routine invadates the entire pmap segment cache
5423;
5424; Translation is on, interrupts may or may not be enabled.
5425;
5426
5427 .align 5
5428 .globl EXT(invalidateSegs)
5429
5430LEXT(invalidateSegs)
5431
5432 la r10,pmapCCtl(r3) ; Point to the segment cache control
5433 eqv r2,r2,r2 ; Get all foxes
5434
5435isInv: lwarx r4,0,r10 ; Get the segment cache control value
5436 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5437 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5438 bne-- isInv0 ; Yes, try again...
5439
5440 stwcx. r4,0,r10 ; Try to invalidate it
5441 bne-- isInv ; Someone else just stuffed it...
5442 blr ; Leave...
5443
5444
5445isInv0: li r4,lgKillResv ; Get reservation kill zone
5446 stwcx. r4,0,r4 ; Kill reservation
5447
5448isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5449 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5450 bne-- isInv ; Nope...
5451 b isInv1 ; Still locked do it again...
5452
5453;
5454; This routine switches segment registers between kernel and user.
5455; We have some assumptions and rules:
5456; We are in the exception vectors
5457; pf64Bitb is set up
5458; R3 contains the MSR we going to
5459; We can not use R4, R13, R20, R21, R29
5460; R13 is the savearea
5461; R29 has the per_proc
5462;
5463; We return R3 as 0 if we did not switch between kernel and user
5464; We also maintain and apply the user state key modifier used by VMM support;
5465; If we go to the kernel it is set to 0, otherwise it follows the bit
5466; in spcFlags.
5467;
5468
d7e50217 5469 .align 5
55e303ae 5470 .globl EXT(switchSegs)
1c79356b 5471
55e303ae
A
5472LEXT(switchSegs)
5473
5474 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5475 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5476 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5477 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5478 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5479 or r2,r2,r3 ; This will 1 if we will be using user segments
5480 li r3,0 ; Get a selection mask
5481 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5482 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5483 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5484 la r19,ppUserPmap(r29) ; Point to the current user pmap
5485
5486; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5487 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5488
5489 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5490 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5491 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5492 or r8,r8,r19 ; Get the pointer to the pmap we are using
5493
5494 beqlr ; We are staying in the same mode, do not touch segs...
5495
5496 lwz r28,0(r8) ; Get top half of pmap address
5497 lwz r10,4(r8) ; Get bottom half
5498
5499 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5500 rlwinm r28,r28,0,1,0 ; Copy top to top
5501 stw r30,ppMapFlags(r29) ; Set the key modifier
5502 rlwimi r28,r10,0,0,31 ; Insert bottom
5503
5504 la r10,pmapCCtl(r28) ; Point to the segment cache control
5505 la r9,pmapSegCache(r28) ; Point to the segment cache
5506
5507ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5508 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5509 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5510 bne-- ssgLock0 ; Yup, this is in use...
5511
5512 stwcx. r16,0,r10 ; Try to set the lock
5513 bne-- ssgLock ; Did we get contention?
5514
5515 not r11,r15 ; Invert the invalids to valids
5516 li r17,0 ; Set a mask for the SRs we are loading
5517 isync ; Make sure we are all caught up
5518
5519 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5520
5521 li r0,0 ; Clear
5522 slbia ; Trash all SLB entries (except for entry 0 that is)
5523 li r17,1 ; Get SLB index to load (skip slb 0)
5524 oris r0,r0,0x8000 ; Get set for a mask
5525 b ssg64Enter ; Start on a cache line...
d7e50217
A
5526
5527 .align 5
d7e50217 5528
55e303ae
A
5529ssgLock0: li r15,lgKillResv ; Killing field
5530 stwcx. r15,0,r15 ; Kill reservation
d7e50217 5531
55e303ae
A
5532ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5533 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5534 beq++ ssgLock ; Yup, this is in use...
5535 b ssgLock1 ; Nope, try again...
5536;
5537; This is the 32-bit address space switch code.
5538; We take a reservation on the segment cache and walk through.
5539; For each entry, we load the specified entries and remember which
5540; we did with a mask. Then, we figure out which segments should be
5541; invalid and then see which actually are. Then we load those with the
5542; defined invalid VSID.
5543; Afterwards, we unlock the segment cache.
5544;
d7e50217 5545
55e303ae
A
5546 .align 5
5547
5548ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5549 cmplwi r12,pmapSegCacheUse ; See if we are done
5550 slwi r14,r12,4 ; Index to the cache slot
5551 lis r0,0x8000 ; Get set for a mask
5552 add r14,r14,r9 ; Point to the entry
5553
5554 bge- ssg32Done ; All done...
5555
5556 lwz r5,sgcESID+4(r14) ; Get the ESID part
5557 srw r2,r0,r12 ; Form a mask for the one we are loading
5558 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5559
5560 andc r11,r11,r2 ; Clear the bit
5561 lwz r6,sgcVSID(r14) ; And get the VSID top
5562
5563 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5564
5565 xor r7,r7,r30 ; Modify the key before we actually set it
5566 srw r0,r0,r2 ; Get a mask for the SR we are loading
5567 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5568 or r17,r17,r0 ; Remember the segment
5569 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5570 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5571
5572 mtsrin r8,r5 ; Load the segment
5573 b ssg32Enter ; Go enter the next...
5574
5575 .align 5
5576
5577ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5578 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5579
5580 lis r0,0x8000 ; Get set for a mask
5581 li r2,invalSpace ; Set the invalid address space VSID
5582
5583 nop ; Align loop
5584 nop ; Align loop
5585 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5586 nop ; Align loop
5587
5588ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5589 cmplwi r18,16 ; Have we finished?
5590 srw r22,r0,r18 ; Get the mask bit
5591 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5592 andc r16,r16,r22 ; Get rid of the guy we just did
5593 bge ssg32Really ; Yes, we are really done now...
5594
5595 mtsrin r2,r23 ; Invalidate the SR
5596 b ssg32Inval ; Do the next...
5597
5598 .align 5
5599
5600ssg32Really:
5601 stw r17,validSegs(r29) ; Set the valid SR flags
5602 li r3,1 ; Set kernel/user transition
5603 blr
5604
5605;
5606; This is the 64-bit address space switch code.
5607; First we blow away all of the SLB entries.
5608; Walk through,
5609; loading the SLB. Afterwards, we release the cache lock
5610;
5611; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5612; Its a performance thing...
5613;
1c79356b
A
5614
5615 .align 5
1c79356b 5616
55e303ae
A
5617ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5618 cmplwi r12,pmapSegCacheUse ; See if we are done
5619 slwi r14,r12,4 ; Index to the cache slot
5620 srw r16,r0,r12 ; Form a mask for the one we are loading
5621 add r14,r14,r9 ; Point to the entry
5622 andc r11,r11,r16 ; Clear the bit
5623 bge-- ssg64Done ; All done...
5624
5625 ld r5,sgcESID(r14) ; Get the ESID part
5626 ld r6,sgcVSID(r14) ; And get the VSID part
5627 oris r5,r5,0x0800 ; Turn on the valid bit
5628 or r5,r5,r17 ; Insert the SLB slot
5629 xor r6,r6,r30 ; Modify the key before we actually set it
5630 addi r17,r17,1 ; Bump to the next slot
5631 slbmte r6,r5 ; Make that SLB entry
5632 b ssg64Enter ; Go enter the next...
1c79356b 5633
55e303ae 5634 .align 5
d7e50217 5635
55e303ae 5636ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
d7e50217 5637
55e303ae
A
5638 eqv r16,r16,r16 ; Load up with all foxes
5639 subfic r17,r17,64 ; Get the number of 1 bits we need
5640
5641 sld r16,r16,r17 ; Get a mask for the used SLB entries
5642 li r3,1 ; Set kernel/user transition
5643 std r16,validSegs(r29) ; Set the valid SR flags
1c79356b
A
5644 blr
5645
55e303ae
A
5646;
5647; mapSetUp - this function sets initial state for all mapping functions.
5648; We turn off all translations (physical), disable interruptions, and
5649; enter 64-bit mode if applicable.
5650;
5651; We also return the original MSR in r11, the feature flags in R12,
5652; and CR6 set up so we can do easy branches for 64-bit
91447636 5653; hw_clear_maps assumes r10, r9 will not be trashed.
55e303ae
A
5654;
5655
5656 .align 5
5657 .globl EXT(mapSetUp)
5658
5659LEXT(mapSetUp)
5660
5661 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5662 mfsprg r12,2 ; Get feature flags
5663 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5664 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5665 mfmsr r11 ; Save the MSR
5666 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5667 andc r11,r11,r0 ; Clear VEC and FP for good
5668 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5669 li r2,1 ; Prepare for 64 bit
5670 andc r0,r11,r0 ; Clear the rest
5671 bt pfNoMSRirb,msuNoMSR ; No MSR...
5672 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
d7e50217 5673
55e303ae
A
5674 mtmsr r0 ; Translation and all off
5675 isync ; Toss prefetch
5676 blr ; Return...
5677
5678 .align 5
5679
5680msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5681 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5682 isync ; synchronize
5683 blr ; Return...
5684
5685 .align 5
5686
5687msuNoMSR: mr r2,r3 ; Save R3 across call
5688 mr r3,r0 ; Get the new MSR value
5689 li r0,loadMSR ; Get the MSR setter SC
5690 sc ; Set it
5691 mr r3,r2 ; Restore R3
5692 blr ; Go back all set up...
5693
5694
91447636
A
5695;
5696; Guest shadow assist -- remove all guest mappings
5697;
5698; Remove all mappings for a guest pmap from the shadow hash table.
5699;
5700; Parameters:
5701; r3 : address of pmap, 32-bit kernel virtual address
5702;
5703; Non-volatile register usage:
5704; r24 : host pmap's physical address
5705; r25 : VMM extension block's physical address
5706; r26 : physent address
5707; r27 : guest pmap's space ID number
5708; r28 : current hash table page index
5709; r29 : guest pmap's physical address
5710; r30 : saved msr image
5711; r31 : current mapping
5712;
5713 .align 5
5714 .globl EXT(hw_rem_all_gv)
5715
5716LEXT(hw_rem_all_gv)
5717
5718#define graStackSize ((31-24+1)*4)+4
5719 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5720 ; Mint a new stack frame
5721 mflr r0 ; Get caller's return address
5722 mfsprg r11,2 ; Get feature flags
5723 mtcrf 0x02,r11 ; Insert feature flags into cr6
5724 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5725 ; Save caller's return address
5726 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5727 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5728 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5729 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5730 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5731 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5732 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5733 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5734
5735 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5736
5737 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5738 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5739 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5740 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5741 b graStart ; Get to it
5742gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5743 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5744 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5745graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5746 xor r29,r3,r9 ; Convert pmap_t virt->real
5747 mr r30,r11 ; Save caller's msr image
5748
5749 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5750 bl sxlkExclusive ; Get lock exclusive
5751
5752 lwz r3,vxsGra(r25) ; Get remove all count
5753 addi r3,r3,1 ; Increment remove all count
5754 stw r3,vxsGra(r25) ; Update remove all count
5755
5756 li r28,0 ; r28 <- first hash page table index to search
5757 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5758graPgLoop:
5759 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5760 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5761 ; Convert page index into page physical index offset
5762 add r31,r31,r11 ; Calculate page physical index entry address
5763 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5764 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5765 b graLoop ; Examine all slots in this page
5766gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5767 b graLoop ; Examine all slots in this page
5768
5769 .align 5
5770graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5771 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5772 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5773 xor r4,r4,r27 ; Compare space ID number
5774 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5775 bne graMiss ; Not one of ours, skip it
5776
5777 lwz r11,vxsGraHits(r25) ; Get remove hit count
5778 addi r11,r11,1 ; Increment remove hit count
5779 stw r11,vxsGraHits(r25) ; Update remove hit count
5780
5781 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5782 bne graRemPhys ; Yes, nothing to disconnect
5783
5784 lwz r11,vxsGraActive(r25) ; Get remove active count
5785 addi r11,r11,1 ; Increment remove hit count
5786 stw r11,vxsGraActive(r25) ; Update remove hit count
5787
5788 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5789 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5790 ; r31 <- mapping's physical address
5791 ; r3 -> PTE slot physical address
5792 ; r4 -> High-order 32 bits of PTE
5793 ; r5 -> Low-order 32 bits of PTE
5794 ; r6 -> PCA
5795 ; r7 -> PCA physical address
5796 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5797 b graFreePTE ; Join 64-bit path to release the PTE
5798graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5799 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5800graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5801 beq- graRemPhys ; No valid PTE, we're almost done
5802 lis r0,0x8000 ; Prepare free bit for this slot
5803 srw r0,r0,r2 ; Position free bit
5804 or r6,r6,r0 ; Set it in our PCA image
5805 lwz r8,mpPte(r31) ; Get PTE pointer
5806 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5807 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5808 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5809 stw r6,0(r7) ; Update PCA and unlock the PTEG
5810
5811graRemPhys:
5812 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5813 bl mapFindLockPN ; Find 'n' lock this page's physent
5814 mr. r26,r3 ; Got lock on our physent?
5815 beq-- graBadPLock ; No, time to bail out
5816
5817 crset cr1_eq ; cr1_eq <- previous link is the anchor
5818 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5819 la r11,ppLink+4(r26) ; Point to chain anchor
5820 lwz r9,ppLink+4(r26) ; Get chain anchor
5821 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5822
5823graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5824 cmplw r9,r31 ; Is this the mapping to remove?
5825 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5826 bne graRemNext ; No, chain onward
5827 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5828 stw r8,0(r11) ; Unchain gpv->phys mapping
5829 b graRemoved ; Exit loop
5830graRemRetry:
5831 lwarx r0,0,r11 ; Get previous link
5832 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5833 stwcx. r0,0,r11 ; Update previous link
5834 bne- graRemRetry ; Lost reservation, retry
5835 b graRemoved ; Good work, let's get outta here
5836
5837graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5838 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5839 mr. r9,r8 ; Does next entry exist?
5840 b graRemLoop ; Carry on
5841
5842graRemove64:
5843 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5844 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5845 la r11,ppLink(r26) ; Point to chain anchor
5846 ld r9,ppLink(r26) ; Get chain anchor
5847 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5848graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5849 cmpld r9,r31 ; Is this the mapping to remove?
5850 ld r8,mpAlias(r9) ; Get forward chain pinter
5851 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5852 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5853 std r8,0(r11) ; Unchain gpv->phys mapping
5854 b graRemoved ; Exit loop
5855graRem64Rt: ldarx r0,0,r11 ; Get previous link
5856 and r0,r0,r7 ; Get flags
5857 or r0,r0,r8 ; Insert new forward pointer
5858 stdcx. r0,0,r11 ; Slam it back in
5859 bne-- graRem64Rt ; Lost reservation, retry
5860 b graRemoved ; Good work, let's go home
5861
5862graRem64Nxt:
5863 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5864 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5865 mr. r9,r8 ; Does next entry exist?
5866 b graRem64Lp ; Carry on
5867
5868graRemoved:
5869 mr r3,r26 ; r3 <- physent's address
5870 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5871
5872 lwz r3,mpFlags(r31) ; Get mapping's flags
5873 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5874 ori r3,r3,mpgFree ; Mark mapping free
5875 stw r3,mpFlags(r31) ; Update flags
5876
5877graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5878 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5879 bne graLoop ; No, examine next slot
5880 addi r28,r28,1 ; Increment hash table page index
5881 cmplwi r28,GV_HPAGES ; End of hash table?
5882 bne graPgLoop ; Examine next hash table page
5883
5884 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5885 bl sxlkUnlock ; Release host pmap's search lock
5886
5887 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5888 mtmsr r30 ; Restore 'rupts, translation
5889 isync ; Throw a small wrench into the pipeline
5890 b graPopFrame ; Nothing to do now but pop a frame and return
5891graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5892graPopFrame:
5893 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5894 ; Get caller's return address
5895 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5896 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5897 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5898 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5899 mtlr r0 ; Prepare return address
5900 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5901 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5902 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5903 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5904 lwz r1,0(r1) ; Pop stack frame
5905 blr ; Return to caller
5906
5907graBadPLock:
5908graRemoveMiss:
5909 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5910 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5911 li r3,failMapping ; The BOMB, Dmitri.
5912 sc ; The hydrogen bomb.
5913
5914
5915;
5916; Guest shadow assist -- remove local guest mappings
5917;
5918; Remove local mappings for a guest pmap from the shadow hash table.
5919;
5920; Parameters:
5921; r3 : address of guest pmap, 32-bit kernel virtual address
5922;
5923; Non-volatile register usage:
5924; r20 : current active map word's physical address
5925; r21 : current hash table page address
5926; r22 : updated active map word in process
5927; r23 : active map word in process
5928; r24 : host pmap's physical address
5929; r25 : VMM extension block's physical address
5930; r26 : physent address
5931; r27 : guest pmap's space ID number
5932; r28 : current active map index
5933; r29 : guest pmap's physical address
5934; r30 : saved msr image
5935; r31 : current mapping
5936;
5937 .align 5
5938 .globl EXT(hw_rem_local_gv)
5939
5940LEXT(hw_rem_local_gv)
5941
5942#define grlStackSize ((31-20+1)*4)+4
5943 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5944 ; Mint a new stack frame
5945 mflr r0 ; Get caller's return address
5946 mfsprg r11,2 ; Get feature flags
5947 mtcrf 0x02,r11 ; Insert feature flags into cr6
5948 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5949 ; Save caller's return address
5950 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5951 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5952 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5953 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5954 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5955 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5956 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5957 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5958 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
5959 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
5960 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
5961 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
5962
5963 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5964
5965 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
5966 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5967 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5968 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5969 b grlStart ; Get to it
5970grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5971 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5972 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5973
5974grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5975 xor r29,r3,r9 ; Convert pmap_t virt->real
5976 mr r30,r11 ; Save caller's msr image
5977
5978 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5979 bl sxlkExclusive ; Get lock exclusive
5980
5981 li r28,0 ; r28 <- index of first active map word to search
5982 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5983 b grlMap1st ; Examine first map word
5984
5985 .align 5
5986grlNextMap: stw r22,0(r21) ; Save updated map word
5987 addi r28,r28,1 ; Increment map word index
5988 cmplwi r28,GV_MAP_WORDS ; See if we're done
5989 beq grlDone ; Yup, let's get outta here
5990
5991grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
5992 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
5993 ; Convert map index into map index offset
5994 add r20,r20,r11 ; Calculate map array element address
5995 lwz r22,0(r20) ; Get active map word at index
5996 mr. r23,r22 ; Any active mappings indicated?
5997 beq grlNextMap ; Nope, check next word
5998
5999 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6000 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6001 ; Extract page index from map word index and convert
6002 ; into page physical index offset
6003 add r21,r21,r11 ; Calculate page physical index entry address
6004 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6005 lwz r21,4(r21) ; Get selected hash table page's address
6006 b grlLoop ; Examine all slots in this page
6007grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6008 b grlLoop ; Examine all slots in this page
6009
6010 .align 5
6011grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6012 cmplwi r11,32 ; Any active mappings left in this word?
6013 lis r12,0x8000 ; Prepare mask to reset bit
6014 srw r12,r12,r11 ; Position mask bit
6015 andc r23,r23,r12 ; Reset lit bit
6016 beq grlNextMap ; No bits lit, examine next map word
6017
6018 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6019 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6020 ; Extract slot band number from index and insert
6021 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6022
6023 lwz r3,mpFlags(r31) ; Get mapping's flags
6024 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6025 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6026 xor r4,r4,r27 ; Compare space ID number
6027 or. r4,r4,r5 ; (space id miss || global)
6028 bne grlLoop ; Not one of ours, skip it
6029 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6030 ori r3,r3,mpgDormant ; Mark entry dormant
6031 stw r3,mpFlags(r31) ; Update mapping's flags
6032
6033 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6034 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6035 ; r31 <- mapping's physical address
6036 ; r3 -> PTE slot physical address
6037 ; r4 -> High-order 32 bits of PTE
6038 ; r5 -> Low-order 32 bits of PTE
6039 ; r6 -> PCA
6040 ; r7 -> PCA physical address
6041 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6042 b grlFreePTE ; Join 64-bit path to release the PTE
6043grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6044 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6045grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6046 beq- grlLoop ; No valid PTE, we're done with this mapping
6047 lis r0,0x8000 ; Prepare free bit for this slot
6048 srw r0,r0,r2 ; Position free bit
6049 or r6,r6,r0 ; Set it in our PCA image
6050 lwz r8,mpPte(r31) ; Get PTE pointer
6051 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6052 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6053 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6054 stw r6,0(r7) ; Update PCA and unlock the PTEG
6055 b grlLoop ; On to next active mapping in this map word
6056
6057grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6058 bl sxlkUnlock ; Release host pmap's search lock
6059
6060 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6061 mtmsr r30 ; Restore 'rupts, translation
6062 isync ; Throw a small wrench into the pipeline
6063 b grlPopFrame ; Nothing to do now but pop a frame and return
6064grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6065grlPopFrame:
6066 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6067 ; Get caller's return address
6068 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6069 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6070 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6071 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6072 mtlr r0 ; Prepare return address
6073 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6074 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6075 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6076 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6077 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6078 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6079 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6080 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6081 lwz r1,0(r1) ; Pop stack frame
6082 blr ; Return to caller
6083
6084
6085;
6086; Guest shadow assist -- resume a guest mapping
6087;
6088; Locates the specified dormant mapping, and if it exists validates it and makes it
6089; active.
6090;
6091; Parameters:
6092; r3 : address of host pmap, 32-bit kernel virtual address
6093; r4 : address of guest pmap, 32-bit kernel virtual address
6094; r5 : host virtual address, high-order 32 bits
6095; r6 : host virtual address, low-order 32 bits
6096; r7 : guest virtual address, high-order 32 bits
6097; r8 : guest virtual address, low-order 32 bits
6098; r9 : guest mapping protection code
6099;
6100; Non-volatile register usage:
6101; r23 : VMM extension block's physical address
6102; r24 : physent physical address
6103; r25 : caller's msr image from mapSetUp
6104; r26 : guest mapping protection code
6105; r27 : host pmap physical address
6106; r28 : guest pmap physical address
6107; r29 : host virtual address
6108; r30 : guest virtual address
6109; r31 : gva->phys mapping's physical address
6110;
6111 .align 5
6112 .globl EXT(hw_res_map_gv)
6113
6114LEXT(hw_res_map_gv)
6115
6116#define grsStackSize ((31-23+1)*4)+4
6117
6118 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6119 ; Mint a new stack frame
6120 mflr r0 ; Get caller's return address
6121 mfsprg r11,2 ; Get feature flags
6122 mtcrf 0x02,r11 ; Insert feature flags into cr6
6123 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6124 ; Save caller's return address
6125 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6126 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6127 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6128 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6129 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6130 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6131 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6132 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6133 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6134
6135 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6136 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6137 mr r26,r9 ; Copy guest mapping protection code
6138
6139 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6140 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6141 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6142 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6143 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6144 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6145 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6146 srwi r11,r30,12 ; Form shadow hash:
6147 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6148 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6149 ; Form index offset from hash page number
6150 add r31,r31,r10 ; r31 <- hash page index entry
6151 lwz r31,4(r31) ; r31 <- hash page paddr
6152 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6153 ; r31 <- hash group paddr
6154 b grsStart ; Get to it
6155
6156grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6157 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6158 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6159 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6160 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6161 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6162 srwi r11,r30,12 ; Form shadow hash:
6163 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6164 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6165 ; Form index offset from hash page number
6166 add r31,r31,r10 ; r31 <- hash page index entry
6167 ld r31,0(r31) ; r31 <- hash page paddr
6168 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6169 ; r31 <- hash group paddr
6170
6171grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6172 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6173 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6174 mr r25,r11 ; Save caller's msr image
6175
6176 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6177 bl sxlkExclusive ; Get lock exclusive
6178
6179 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6180 mtctr r0 ; in this group
6181 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6182
6183 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6184 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6185 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6186 b grs32SrchLp ; Let the search begin!
6187
6188 .align 5
6189grs32SrchLp:
6190 mr r6,r3 ; r6 <- current mapping slot's flags
6191 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6192 mr r7,r4 ; r7 <- current mapping slot's space ID
6193 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6194 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6195 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6196 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6197 xor r7,r7,r9 ; Compare space ID
6198 or r0,r11,r7 ; r0 <- !(!free && space match)
6199 xor r8,r8,r30 ; Compare virtual address
6200 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6201 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6202
6203 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6204 bdnz grs32SrchLp ; Iterate
6205
6206 mr r6,r3 ; r6 <- current mapping slot's flags
6207 clrrwi r5,r5,12 ; Remove flags from virtual address
6208 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6209 xor r4,r4,r9 ; Compare space ID
6210 or r0,r11,r4 ; r0 <- !(!free && space match)
6211 xor r5,r5,r30 ; Compare virtual address
6212 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6213 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6214 b grsSrchMiss ; No joy in our hash group
6215
6216grs64Search:
6217 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6218 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6219 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6220 b grs64SrchLp ; Let the search begin!
6221
6222 .align 5
6223grs64SrchLp:
6224 mr r6,r3 ; r6 <- current mapping slot's flags
6225 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6226 mr r7,r4 ; r7 <- current mapping slot's space ID
6227 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6228 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6229 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6230 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6231 xor r7,r7,r9 ; Compare space ID
6232 or r0,r11,r7 ; r0 <- !(!free && space match)
6233 xor r8,r8,r30 ; Compare virtual address
6234 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6235 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6236
6237 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6238 bdnz grs64SrchLp ; Iterate
6239
6240 mr r6,r3 ; r6 <- current mapping slot's flags
6241 clrrdi r5,r5,12 ; Remove flags from virtual address
6242 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6243 xor r4,r4,r9 ; Compare space ID
6244 or r0,r11,r4 ; r0 <- !(!free && space match)
6245 xor r5,r5,r30 ; Compare virtual address
6246 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6247 bne grsSrchMiss ; No joy in our hash group
6248
6249grsSrchHit:
6250 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6251 bne grsFindHost ; Yes, nothing to disconnect
6252
6253 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6254 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6255 ; r31 <- mapping's physical address
6256 ; r3 -> PTE slot physical address
6257 ; r4 -> High-order 32 bits of PTE
6258 ; r5 -> Low-order 32 bits of PTE
6259 ; r6 -> PCA
6260 ; r7 -> PCA physical address
6261 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6262 b grsFreePTE ; Join 64-bit path to release the PTE
6263grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6264 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6265grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6266 beq- grsFindHost ; No valid PTE, we're almost done
6267 lis r0,0x8000 ; Prepare free bit for this slot
6268 srw r0,r0,r2 ; Position free bit
6269 or r6,r6,r0 ; Set it in our PCA image
6270 lwz r8,mpPte(r31) ; Get PTE pointer
6271 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6272 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6273 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6274 stw r6,0(r7) ; Update PCA and unlock the PTEG
6275
6276grsFindHost:
6277
6278// We now have a dormant guest mapping that matches our space id and virtual address. Our next
6279// step is to locate the host mapping that completes the guest mapping's connection to a physical
6280// frame. The guest and host mappings must connect to the same physical frame, so they must both
6281// be chained on the same physent. We search the physent chain for a host mapping matching our
6282// host's space id and the host virtual address. If we succeed, we know that the entire chain
6283// of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6284// resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6285// host virtual or physical address has changed since the guest mapping was suspended, so it
6286// is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6287// our caller that it will have to take its long path, translating the host virtual address
6288// through the host's skiplist and installing a new guest mapping.
6289
6290 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6291 bl mapFindLockPN ; Find 'n' lock this page's physent
6292 mr. r24,r3 ; Got lock on our physent?
6293 beq-- grsBadPLock ; No, time to bail out
6294
6295 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6296
6297 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6298 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6299 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6300grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6301 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6302 lwz r7,mpFlags(r12) ; Get mapping's flags
6303 lhz r4,mpSpace(r12) ; Get mapping's space id number
6304 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6305 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6306
6307 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6308 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6309 xori r0,r0,mpNormal ; Normal mapping?
6310 xor r4,r4,r6 ; Compare w/ host space id number
6311 xor r5,r5,r29 ; Compare w/ host virtual address
6312 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6313 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6314 beq grsPEHit ; Hit
6315 b grsPELoop ; Iterate
6316
6317grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6318 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6319 ld r9,ppLink(r24) ; Get first mapping on physent
6320 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6321 andc r9,r9,r0 ; Cleanup mapping pointer
6322grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6323 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6324 lwz r7,mpFlags(r12) ; Get mapping's flags
6325 lhz r4,mpSpace(r12) ; Get mapping's space id number
6326 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6327 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6328 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6329 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6330 xori r0,r0,mpNormal ; Normal mapping?
6331 xor r4,r4,r6 ; Compare w/ host space id number
6332 xor r5,r5,r29 ; Compare w/ host virtual address
6333 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6334 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6335 beq grsPEHit ; Hit
6336 b grsPELp64 ; Iterate
6337
6338grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6339 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6340 stw r0,mpVAddr+4(r31) ; Write 'em back
6341
6342 eieio ; Ensure previous mapping updates are visible
6343 lwz r0,mpFlags(r31) ; Get flags
6344 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6345 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6346
6347 li r31,mapRtOK ; Indicate success
6348 b grsRelPhy ; Exit through physent lock release
6349
6350grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6351 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6352 la r11,ppLink+4(r24) ; Point to chain anchor
6353 lwz r9,ppLink+4(r24) ; Get chain anchor
6354 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6355grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6356 cmplw r9,r31 ; Is this the mapping to remove?
6357 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6358 bne grsRemNext ; No, chain onward
6359 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6360 stw r8,0(r11) ; Unchain gpv->phys mapping
6361 b grsDelete ; Finish deleting mapping
6362grsRemRetry:
6363 lwarx r0,0,r11 ; Get previous link
6364 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6365 stwcx. r0,0,r11 ; Update previous link
6366 bne- grsRemRetry ; Lost reservation, retry
6367 b grsDelete ; Finish deleting mapping
6368
6369 .align 5
6370grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6371 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6372 mr. r9,r8 ; Does next entry exist?
6373 b grsRemLoop ; Carry on
6374
6375grsRemove64:
6376 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6377 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6378 la r11,ppLink(r24) ; Point to chain anchor
6379 ld r9,ppLink(r24) ; Get chain anchor
6380 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6381grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6382 cmpld r9,r31 ; Is this the mapping to remove?
6383 ld r8,mpAlias(r9) ; Get forward chain pinter
6384 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6385 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6386 std r8,0(r11) ; Unchain gpv->phys mapping
6387 b grsDelete ; Finish deleting mapping
6388grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6389 and r0,r0,r7 ; Get flags
6390 or r0,r0,r8 ; Insert new forward pointer
6391 stdcx. r0,0,r11 ; Slam it back in
6392 bne-- grsRem64Rt ; Lost reservation, retry
6393 b grsDelete ; Finish deleting mapping
6394
6395 .align 5
6396grsRem64Nxt:
6397 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6398 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6399 mr. r9,r8 ; Does next entry exist?
6400 b grsRem64Lp ; Carry on
6401
6402grsDelete:
6403 lwz r3,mpFlags(r31) ; Get mapping's flags
6404 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6405 ori r3,r3,mpgFree ; Mark mapping free
6406 stw r3,mpFlags(r31) ; Update flags
6407
6408 li r31,mapRtNotFnd ; Didn't succeed
6409
6410grsRelPhy: mr r3,r24 ; r3 <- physent addr
6411 bl mapPhysUnlock ; Unlock physent chain
6412
6413grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6414 bl sxlkUnlock ; Release host pmap search lock
6415
6416grsRtn: mr r3,r31 ; r3 <- result code
6417 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6418 mtmsr r25 ; Restore 'rupts, translation
6419 isync ; Throw a small wrench into the pipeline
6420 b grsPopFrame ; Nothing to do now but pop a frame and return
6421grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6422grsPopFrame:
6423 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6424 ; Get caller's return address
6425 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6426 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6427 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6428 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6429 mtlr r0 ; Prepare return address
6430 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6431 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6432 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6433 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6434 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6435 lwz r1,0(r1) ; Pop stack frame
6436 blr ; Return to caller
6437
6438 .align 5
6439grsSrchMiss:
6440 li r31,mapRtNotFnd ; Could not locate requested mapping
6441 b grsRelPmap ; Exit through host pmap search lock release
6442
6443grsBadPLock:
6444grsPEMissMiss:
6445 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6446 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6447 li r3,failMapping ; The BOMB, Dmitri.
6448 sc ; The hydrogen bomb.
6449
6450
6451;
6452; Guest shadow assist -- add a guest mapping
6453;
6454; Adds a guest mapping.
6455;
6456; Parameters:
6457; r3 : address of host pmap, 32-bit kernel virtual address
6458; r4 : address of guest pmap, 32-bit kernel virtual address
6459; r5 : guest virtual address, high-order 32 bits
6460; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6461; r7 : new mapping's flags
6462; r8 : physical address, 32-bit page number
6463;
6464; Non-volatile register usage:
6465; r22 : hash group's physical address
6466; r23 : VMM extension block's physical address
6467; r24 : mapping's flags
6468; r25 : caller's msr image from mapSetUp
6469; r26 : physent physical address
6470; r27 : host pmap physical address
6471; r28 : guest pmap physical address
6472; r29 : physical address, 32-bit 4k-page number
6473; r30 : guest virtual address
6474; r31 : gva->phys mapping's physical address
6475;
6476
6477 .align 5
6478 .globl EXT(hw_add_map_gv)
6479
6480
6481LEXT(hw_add_map_gv)
6482
6483#define gadStackSize ((31-22+1)*4)+4
6484
6485 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6486 ; Mint a new stack frame
6487 mflr r0 ; Get caller's return address
6488 mfsprg r11,2 ; Get feature flags
6489 mtcrf 0x02,r11 ; Insert feature flags into cr6
6490 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6491 ; Save caller's return address
6492 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6493 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6494 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6495 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6496 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6497 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6498 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6499 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6500 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6501 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6502
6503 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6504 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6505 mr r24,r7 ; Copy guest mapping's flags
6506 mr r29,r8 ; Copy target frame's physical address
6507
6508 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6509 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6510 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6511 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6512 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6513 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6514 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6515 srwi r11,r30,12 ; Form shadow hash:
6516 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6517 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6518 ; Form index offset from hash page number
6519 add r22,r22,r10 ; r22 <- hash page index entry
6520 lwz r22,4(r22) ; r22 <- hash page paddr
6521 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6522 ; r22 <- hash group paddr
6523 b gadStart ; Get to it
6524
6525gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6526 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6527 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6528 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6529 srwi r11,r30,12 ; Form shadow hash:
6530 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6531 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6532 ; Form index offset from hash page number
6533 add r22,r22,r10 ; r22 <- hash page index entry
6534 ld r22,0(r22) ; r22 <- hash page paddr
6535 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6536 ; r22 <- hash group paddr
6537
6538gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6539 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6540 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6541 mr r25,r11 ; Save caller's msr image
6542
6543 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6544 bl sxlkExclusive ; Get lock exlusive
6545
6546 mr r31,r22 ; Prepare to search this group
6547 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6548 mtctr r0 ; in this group
6549 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6550
6551 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6552 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6553 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6554 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6555 b gad32SrchLp ; Let the search begin!
6556
6557 .align 5
6558gad32SrchLp:
6559 mr r6,r3 ; r6 <- current mapping slot's flags
6560 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6561 mr r7,r4 ; r7 <- current mapping slot's space ID
6562 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6563 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6564 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6565 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6566 xor r7,r7,r9 ; Compare space ID
6567 or r0,r11,r7 ; r0 <- !(!free && space match)
6568 xor r8,r8,r12 ; Compare virtual address
6569 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6570 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6571
6572 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6573 bdnz gad32SrchLp ; Iterate
6574
6575 mr r6,r3 ; r6 <- current mapping slot's flags
6576 clrrwi r5,r5,12 ; Remove flags from virtual address
6577 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6578 xor r4,r4,r9 ; Compare space ID
6579 or r0,r11,r4 ; r0 <- !(!free && && space match)
6580 xor r5,r5,r12 ; Compare virtual address
6581 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6582 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6583 b gadScan ; No joy in our hash group
6584
6585gad64Search:
6586 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6587 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6588 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6589 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6590 b gad64SrchLp ; Let the search begin!
6591
6592 .align 5
6593gad64SrchLp:
6594 mr r6,r3 ; r6 <- current mapping slot's flags
6595 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6596 mr r7,r4 ; r7 <- current mapping slot's space ID
6597 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6598 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6599 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6600 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6601 xor r7,r7,r9 ; Compare space ID
6602 or r0,r11,r7 ; r0 <- !(!free && space match)
6603 xor r8,r8,r12 ; Compare virtual address
6604 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6605 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6606
6607 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6608 bdnz gad64SrchLp ; Iterate
6609
6610 mr r6,r3 ; r6 <- current mapping slot's flags
6611 clrrdi r5,r5,12 ; Remove flags from virtual address
6612 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6613 xor r4,r4,r9 ; Compare space ID
6614 or r0,r11,r4 ; r0 <- !(!free && && space match)
6615 xor r5,r5,r12 ; Compare virtual address
6616 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6617 bne gadScan ; No joy in our hash group
6618 b gadRelPmap ; Hit, let upper-level redrive sort it out
6619
6620gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6621 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6622 ; Prepare to address slot at cursor
6623 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6624 mtctr r0 ; in this group
6625 or r2,r22,r12 ; r2 <- 1st mapping to search
6626 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6627 li r11,0 ; No dormant entries found yet
6628 b gadScanLoop ; Let the search begin!
6629
6630 .align 5
6631gadScanLoop:
6632 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6633 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6634 ; Trim off any carry, wrapping into slot number range
6635 mr r31,r2 ; r31 <- current mapping's address
6636 or r2,r22,r12 ; r2 <- next mapping to search
6637 mr r6,r3 ; r6 <- current mapping slot's flags
6638 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6639 rlwinm. r0,r6,0,mpgFree ; Test free flag
6640 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6641 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6642 xori r0,r0,mpgDormant ; Invert dormant flag
6643 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6644 bne gadNotDorm ; Not dormant or we've already seen one
6645 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6646gadNotDorm: bdnz gadScanLoop ; Iterate
6647
6648 mr r31,r2 ; r31 <- final mapping's address
6649 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6650 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6651 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6652 xori r0,r0,mpgDormant ; Invert dormant flag
6653 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6654 bne gadCkDormant ; Not dormant or we've already seen one
6655 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6656
6657gadCkDormant:
6658 mr. r31,r11 ; Get dormant mapping, if any, and test
6659 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6660
6661gadSteal:
6662 lbz r12,mpgCursor(r22) ; Get group's cursor
6663 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6664 ; Prepare to address slot at cursor
6665 or r31,r22,r12 ; r31 <- address of mapping to steal
6666
6667 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6668 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6669 ; r31 <- mapping's physical address
6670 ; r3 -> PTE slot physical address
6671 ; r4 -> High-order 32 bits of PTE
6672 ; r5 -> Low-order 32 bits of PTE
6673 ; r6 -> PCA
6674 ; r7 -> PCA physical address
6675 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6676 b gadFreePTE ; Join 64-bit path to release the PTE
6677gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6678 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6679gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6680 beq- gadUpCursor ; No valid PTE, we're almost done
6681 lis r0,0x8000 ; Prepare free bit for this slot
6682 srw r0,r0,r2 ; Position free bit
6683 or r6,r6,r0 ; Set it in our PCA image
6684 lwz r8,mpPte(r31) ; Get PTE pointer
6685 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6686 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6687 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6688 stw r6,0(r7) ; Update PCA and unlock the PTEG
6689
6690gadUpCursor:
6691 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6692 ; Recover slot number from stolen mapping's address
6693 addi r12,r12,1 ; Increment slot number
6694 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6695 stb r12,mpgCursor(r22) ; Update group's cursor
6696
6697 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6698 bl mapFindLockPN ; Find 'n' lock this page's physent
6699 mr. r26,r3 ; Got lock on our physent?
6700 beq-- gadBadPLock ; No, time to bail out
6701
6702 crset cr1_eq ; cr1_eq <- previous link is the anchor
6703 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6704 la r11,ppLink+4(r26) ; Point to chain anchor
6705 lwz r9,ppLink+4(r26) ; Get chain anchor
6706 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6707gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6708 cmplw r9,r31 ; Is this the mapping to remove?
6709 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6710 bne gadRemNext ; No, chain onward
6711 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6712 stw r8,0(r11) ; Unchain gpv->phys mapping
6713 b gadDelDone ; Finish deleting mapping
6714gadRemRetry:
6715 lwarx r0,0,r11 ; Get previous link
6716 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6717 stwcx. r0,0,r11 ; Update previous link
6718 bne- gadRemRetry ; Lost reservation, retry
6719 b gadDelDone ; Finish deleting mapping
6720
6721gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6722 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6723 mr. r9,r8 ; Does next entry exist?
6724 b gadRemLoop ; Carry on
6725
6726gadRemove64:
6727 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6728 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6729 la r11,ppLink(r26) ; Point to chain anchor
6730 ld r9,ppLink(r26) ; Get chain anchor
6731 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6732gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6733 cmpld r9,r31 ; Is this the mapping to remove?
6734 ld r8,mpAlias(r9) ; Get forward chain pinter
6735 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6736 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6737 std r8,0(r11) ; Unchain gpv->phys mapping
6738 b gadDelDone ; Finish deleting mapping
6739gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6740 and r0,r0,r7 ; Get flags
6741 or r0,r0,r8 ; Insert new forward pointer
6742 stdcx. r0,0,r11 ; Slam it back in
6743 bne-- gadRem64Rt ; Lost reservation, retry
6744 b gadDelDone ; Finish deleting mapping
6745
6746 .align 5
6747gadRem64Nxt:
6748 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6749 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6750 mr. r9,r8 ; Does next entry exist?
6751 b gadRem64Lp ; Carry on
6752
6753gadDelDone:
6754 mr r3,r26 ; Get physent address
6755 bl mapPhysUnlock ; Unlock physent chain
6756
6757gadFillMap:
6758 lwz r12,pmapSpace(r28) ; Get guest space id number
6759 li r2,0 ; Get a zero
6760 stw r24,mpFlags(r31) ; Set mapping's flags
6761 sth r12,mpSpace(r31) ; Set mapping's space id number
6762 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6763 stw r29,mpPAddr(r31) ; Set mapping's physical address
6764 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6765 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6766 b gadChain ; Continue with chaining mapping to physent
6767gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6768
6769gadChain: mr r3,r29 ; r3 <- physical frame address
6770 bl mapFindLockPN ; Find 'n' lock this page's physent
6771 mr. r26,r3 ; Got lock on our physent?
6772 beq-- gadBadPLock ; No, time to bail out
6773
6774 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6775 lwz r12,ppLink+4(r26) ; Get forward chain
6776 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6777 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6778 stw r11,mpAlias+4(r31) ; New mapping will head chain
6779 stw r12,ppLink+4(r26) ; Point physent to new mapping
6780 b gadFinish ; All over now...
6781
6782gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6783 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6784 ld r12,ppLink(r26) ; Get forward chain
6785 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6786 and r12,r12,r7 ; Isolate pointer's flags
6787 or r12,r12,r31 ; Insert new mapping's address forming pointer
6788 std r11,mpAlias(r31) ; New mapping will head chain
6789 std r12,ppLink(r26) ; Point physent to new mapping
6790
6791gadFinish: eieio ; Ensure new mapping is completely visible
6792
6793gadRelPhy: mr r3,r26 ; r3 <- physent addr
6794 bl mapPhysUnlock ; Unlock physent chain
6795
6796gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6797 bl sxlkUnlock ; Release host pmap search lock
6798
6799 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6800 mtmsr r25 ; Restore 'rupts, translation
6801 isync ; Throw a small wrench into the pipeline
6802 b gadPopFrame ; Nothing to do now but pop a frame and return
6803gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6804gadPopFrame:
6805 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6806 ; Get caller's return address
6807 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6808 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6809 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6810 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6811 mtlr r0 ; Prepare return address
6812 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6813 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6814 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6815 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6816 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6817 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6818 lwz r1,0(r1) ; Pop stack frame
6819 blr ; Return to caller
6820
6821gadPEMissMiss:
6822gadBadPLock:
6823 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6824 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6825 li r3,failMapping ; The BOMB, Dmitri.
6826 sc ; The hydrogen bomb.
6827
6828
6829;
6830; Guest shadow assist -- supend a guest mapping
6831;
6832; Suspends a guest mapping.
6833;
6834; Parameters:
6835; r3 : address of host pmap, 32-bit kernel virtual address
6836; r4 : address of guest pmap, 32-bit kernel virtual address
6837; r5 : guest virtual address, high-order 32 bits
6838; r6 : guest virtual address, low-order 32 bits
6839;
6840; Non-volatile register usage:
6841; r26 : VMM extension block's physical address
6842; r27 : host pmap physical address
6843; r28 : guest pmap physical address
6844; r29 : caller's msr image from mapSetUp
6845; r30 : guest virtual address
6846; r31 : gva->phys mapping's physical address
6847;
6848
6849 .align 5
6850 .globl EXT(hw_susp_map_gv)
6851
6852LEXT(hw_susp_map_gv)
6853
6854#define gsuStackSize ((31-26+1)*4)+4
6855
6856 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6857 ; Mint a new stack frame
6858 mflr r0 ; Get caller's return address
6859 mfsprg r11,2 ; Get feature flags
6860 mtcrf 0x02,r11 ; Insert feature flags into cr6
6861 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6862 ; Save caller's return address
6863 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6864 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6865 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6866 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6867 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6868 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6869
6870 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6871
6872 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6873 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6874 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6875
6876 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6877 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6878 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6879 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6880 srwi r11,r30,12 ; Form shadow hash:
6881 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6882 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6883 ; Form index offset from hash page number
6884 add r31,r31,r10 ; r31 <- hash page index entry
6885 lwz r31,4(r31) ; r31 <- hash page paddr
6886 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6887 ; r31 <- hash group paddr
6888 b gsuStart ; Get to it
6889gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6890 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6891 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6892 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6893 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6894 srwi r11,r30,12 ; Form shadow hash:
6895 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6896 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6897 ; Form index offset from hash page number
6898 add r31,r31,r10 ; r31 <- hash page index entry
6899 ld r31,0(r31) ; r31 <- hash page paddr
6900 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6901 ; r31 <- hash group paddr
6902
6903gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6904 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6905 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6906 mr r29,r11 ; Save caller's msr image
6907
6908 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6909 bl sxlkExclusive ; Get lock exclusive
6910
6911 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6912 mtctr r0 ; in this group
6913 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6914
6915 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6916 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6917 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6918 b gsu32SrchLp ; Let the search begin!
6919
6920 .align 5
6921gsu32SrchLp:
6922 mr r6,r3 ; r6 <- current mapping slot's flags
6923 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6924 mr r7,r4 ; r7 <- current mapping slot's space ID
6925 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6926 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6927 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6928 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6929 xor r7,r7,r9 ; Compare space ID
6930 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6931 xor r8,r8,r30 ; Compare virtual address
6932 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6933 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6934
6935 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6936 bdnz gsu32SrchLp ; Iterate
6937
6938 mr r6,r3 ; r6 <- current mapping slot's flags
6939 clrrwi r5,r5,12 ; Remove flags from virtual address
6940 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6941 xor r4,r4,r9 ; Compare space ID
6942 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6943 xor r5,r5,r30 ; Compare virtual address
6944 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6945 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6946 b gsuSrchMiss ; No joy in our hash group
6947
6948gsu64Search:
6949 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6950 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6951 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6952 b gsu64SrchLp ; Let the search begin!
6953
6954 .align 5
6955gsu64SrchLp:
6956 mr r6,r3 ; r6 <- current mapping slot's flags
6957 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6958 mr r7,r4 ; r7 <- current mapping slot's space ID
6959 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6960 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6961 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6962 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6963 xor r7,r7,r9 ; Compare space ID
6964 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6965 xor r8,r8,r30 ; Compare virtual address
6966 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6967 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6968
6969 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6970 bdnz gsu64SrchLp ; Iterate
6971
6972 mr r6,r3 ; r6 <- current mapping slot's flags
6973 clrrdi r5,r5,12 ; Remove flags from virtual address
6974 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6975 xor r4,r4,r9 ; Compare space ID
6976 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6977 xor r5,r5,r30 ; Compare virtual address
6978 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6979 bne gsuSrchMiss ; No joy in our hash group
6980
6981gsuSrchHit:
6982 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
6983 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6984 ; r31 <- mapping's physical address
6985 ; r3 -> PTE slot physical address
6986 ; r4 -> High-order 32 bits of PTE
6987 ; r5 -> Low-order 32 bits of PTE
6988 ; r6 -> PCA
6989 ; r7 -> PCA physical address
6990 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6991 b gsuFreePTE ; Join 64-bit path to release the PTE
6992gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6993 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6994gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
6995 beq- gsuNoPTE ; No valid PTE, we're almost done
6996 lis r0,0x8000 ; Prepare free bit for this slot
6997 srw r0,r0,r2 ; Position free bit
6998 or r6,r6,r0 ; Set it in our PCA image
6999 lwz r8,mpPte(r31) ; Get PTE pointer
7000 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7001 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7002 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7003 stw r6,0(r7) ; Update PCA and unlock the PTEG
7004
7005gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7006 ori r3,r3,mpgDormant ; Mark entry dormant
7007 stw r3,mpFlags(r31) ; Save updated flags
7008 eieio ; Ensure update is visible when we unlock
7009
7010gsuSrchMiss:
7011 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7012 bl sxlkUnlock ; Release host pmap search lock
7013
7014 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7015 mtmsr r29 ; Restore 'rupts, translation
7016 isync ; Throw a small wrench into the pipeline
7017 b gsuPopFrame ; Nothing to do now but pop a frame and return
7018gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7019gsuPopFrame:
7020 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7021 ; Get caller's return address
7022 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7023 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7024 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7025 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7026 mtlr r0 ; Prepare return address
7027 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7028 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7029 lwz r1,0(r1) ; Pop stack frame
7030 blr ; Return to caller
7031
7032;
7033; Guest shadow assist -- test guest mapping reference and change bits
7034;
7035; Locates the specified guest mapping, and if it exists gathers its reference
7036