]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_vm.s
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_vm.s
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30#include <assym.s>
31#include <debug.h>
1c79356b
A
32#include <db_machine_commands.h>
33#include <mach_rt.h>
34
35#include <mach_debug.h>
36#include <ppc/asm.h>
37#include <ppc/proc_reg.h>
38#include <ppc/exception.h>
39#include <ppc/Performance.h>
40#include <ppc/exception.h>
1c79356b 41#include <mach/ppc/vm_param.h>
1c79356b
A
42
43 .text
44
55e303ae
A
45;
46; 0 0 1 2 3 4 4 5 6
47; 0 8 6 4 2 0 8 6 3
48; +--------+--------+--------+--------+--------+--------+--------+--------+
49; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
50; +--------+--------+--------+--------+--------+--------+--------+--------+
51;
52; 0 0 1
53; 0 8 6
54; +--------+--------+--------+
55; |//////BB|BBBBBBBB|BBBB////| - SID - base
56; +--------+--------+--------+
57;
58; 0 0 1
59; 0 8 6
60; +--------+--------+--------+
61; |////////|11111111|111111//| - SID - copy 1
62; +--------+--------+--------+
63;
64; 0 0 1
65; 0 8 6
66; +--------+--------+--------+
67; |////////|//222222|22222222| - SID - copy 2
68; +--------+--------+--------+
69;
70; 0 0 1
71; 0 8 6
72; +--------+--------+--------+
73; |//////33|33333333|33//////| - SID - copy 3 - not needed
74; +--------+--------+--------+ for 65 bit VPN
75;
76; 0 0 1 2 3 4 4 5 5
77; 0 8 6 4 2 0 8 1 5
78; +--------+--------+--------+--------+--------+--------+--------+
79; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
80; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
81; 0 0 1 2 3 4 4 5 5
82; 0 8 6 4 2 0 8 1 5
83; +--------+--------+--------+--------+--------+--------+--------+
84; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
85; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
86; part of EA to make
87; room for SID base
88;
89;
90; 0 0 1 2 3 4 4 5 5
91; 0 8 6 4 2 0 8 1 5
92; +--------+--------+--------+--------+--------+--------+--------+
93; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
94; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
95;
96; 0 0 1 2 3 4 4 5 6 7 7
97; 0 8 6 4 2 0 8 6 4 2 9
98; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
99; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
100; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
101;
1c79356b
A
102
103
55e303ae 104/* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
1c79356b 105 *
55e303ae 106 * Maps a page or block into a pmap
de355530 107 *
55e303ae 108 * Returns 0 if add worked or the vaddr of the first overlap if not
1c79356b 109 *
55e303ae
A
110 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
111 *
112 * 1) bump mapping busy count
113 * 2) lock pmap share
114 * 3) find mapping full path - finds all possible list previous elements
115 * 4) upgrade pmap to exclusive
116 * 5) add mapping to search list
117 * 6) find physent
118 * 7) lock physent
119 * 8) add to physent
120 * 9) unlock physent
121 * 10) unlock pmap
122 * 11) drop mapping busy count
123 *
124 *
125 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
126 *
127 * 1) bump mapping busy count
128 * 2) lock pmap share
129 * 3) find mapping full path - finds all possible list previous elements
130 * 4) upgrade pmap to exclusive
131 * 5) add mapping to search list
132 * 6) unlock pmap
133 * 7) drop mapping busy count
134 *
1c79356b
A
135 */
136
137 .align 5
138 .globl EXT(hw_add_map)
139
140LEXT(hw_add_map)
55e303ae
A
141
142 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
143 mflr r0 ; Save the link register
144 stw r17,FM_ARG0+0x00(r1) ; Save a register
145 stw r18,FM_ARG0+0x04(r1) ; Save a register
146 stw r19,FM_ARG0+0x08(r1) ; Save a register
147 mfsprg r19,2 ; Get feature flags
148 stw r20,FM_ARG0+0x0C(r1) ; Save a register
149 stw r21,FM_ARG0+0x10(r1) ; Save a register
150 mtcrf 0x02,r19 ; move pf64Bit cr6
151 stw r22,FM_ARG0+0x14(r1) ; Save a register
152 stw r23,FM_ARG0+0x18(r1) ; Save a register
153 stw r24,FM_ARG0+0x1C(r1) ; Save a register
154 stw r25,FM_ARG0+0x20(r1) ; Save a register
155 stw r26,FM_ARG0+0x24(r1) ; Save a register
156 stw r27,FM_ARG0+0x28(r1) ; Save a register
157 stw r28,FM_ARG0+0x2C(r1) ; Save a register
158 stw r29,FM_ARG0+0x30(r1) ; Save a register
159 stw r30,FM_ARG0+0x34(r1) ; Save a register
160 stw r31,FM_ARG0+0x38(r1) ; Save a register
161 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
162
91447636
A
163#if DEBUG
164 lwz r11,pmapFlags(r3) ; Get pmaps flags
165 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
166 bne hamPanic ; Call not valid for guest shadow assist pmap
167#endif
168
55e303ae
A
169 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
170 mr r28,r3 ; Save the pmap
171 mr r31,r4 ; Save the mapping
172 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
173 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
174 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
175
176 b hamSF1x ; Done...
177
178hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
179 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
180
181hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
182
183 mr r17,r11 ; Save the MSR
184 xor r28,r28,r20 ; Convert the pmap to physical addressing
185 xor r31,r31,r21 ; Convert the mapping to physical addressing
186
187 la r3,pmapSXlk(r28) ; Point to the pmap search lock
188 bl sxlkShared ; Go get a shared lock on the mapping lists
189 mr. r3,r3 ; Did we get the lock?
190 lwz r24,mpFlags(r31) ; Pick up the flags
191 bne-- hamBadLock ; Nope...
192
193 li r21,0 ; Remember that we have the shared lock
1c79356b 194
55e303ae
A
195;
196; Note that we do a full search (i.e., no shortcut level skips, etc.)
197; here so that we will know the previous elements so we can dequeue them
198; later.
199;
de355530 200
55e303ae
A
201hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
202 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
203 mr r3,r28 ; Pass in pmap to search
204 lhz r23,mpBSize(r31) ; Get the block size for later
205 mr r29,r4 ; Save top half of vaddr for later
206 mr r30,r5 ; Save bottom half of vaddr for later
207
55e303ae
A
208 bl EXT(mapSearchFull) ; Go see if we can find it
209
3a60a9f5
A
210 li r22,lo16(0x800C) ; Get 0xFFFF800C
211 rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
212 addi r23,r23,1 ; Get actual length
213 rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25
55e303ae 214 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3a60a9f5
A
215 slw r9,r23,r22 ; Isolate the low part
216 rlwnm r22,r23,r22,22,31 ; Extract the high order
217 addic r23,r9,-4096 ; Get the length to the last page
218 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
219 addme r22,r22 ; Do high order as well...
55e303ae 220 mr. r3,r3 ; Did we find a mapping here?
3a60a9f5
A
221 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
222 bne-- hamOverlay ; We found a mapping, this is no good, can not double map...
223
55e303ae
A
224 addc r9,r0,r23 ; Add size to get last page in new range
225 or. r0,r4,r5 ; Are we beyond the end?
226 adde r8,r29,r22 ; Add the rest of the length on
55e303ae
A
227 rlwinm r9,r9,0,0,31 ; Clean top half of sum
228 beq++ hamFits ; We are at the end...
3a60a9f5 229
55e303ae
A
230 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
231 cmplw r8,r4 ; Is our end before the next (top part)
232 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
233 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
234
235 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
d7e50217 236
55e303ae
A
237;
238; Here we try to convert to an exclusive lock. This will fail if someone else
239; has it shared.
240;
241hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
242 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1c79356b 243
55e303ae
A
244 bne-- hamGotX ; We already have the exclusive...
245
246 bl sxlkPromote ; Try to promote shared to exclusive
247 mr. r3,r3 ; Could we?
248 beq++ hamGotX ; Yeah...
249
250;
251; Since we could not promote our lock, we need to convert to it.
252; That means that we drop the shared lock and wait to get it
253; exclusive. Since we release the lock, we need to do the look up
254; again.
255;
d7e50217 256
55e303ae
A
257 la r3,pmapSXlk(r28) ; Point to the pmap search lock
258 bl sxlkConvert ; Convert shared to exclusive
259 mr. r3,r3 ; Could we?
260 bne-- hamBadLock ; Nope, we must have timed out...
1c79356b 261
55e303ae
A
262 li r21,1 ; Remember that we have the exclusive lock
263 b hamRescan ; Go look again...
1c79356b 264
55e303ae 265 .align 5
1c79356b 266
3a60a9f5 267hamGotX: mr r3,r28 ; Get the pmap to insert into
55e303ae
A
268 mr r4,r31 ; Point to the mapping
269 bl EXT(mapInsert) ; Insert the mapping into the list
270
91447636 271 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
55e303ae 272 lhz r8,mpSpace(r31) ; Get the address space
91447636 273 lwz r11,lgpPcfg(r11) ; Get the page config
55e303ae
A
274 mfsdr1 r7 ; Get the hash table base/bounds
275 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
91447636
A
276
277 andi. r0,r24,mpType ; Is this a normal mapping?
55e303ae
A
278
279 rlwimi r8,r8,14,4,17 ; Double address space
91447636 280 rlwinm r9,r30,0,4,31 ; Clear segment
55e303ae
A
281 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
282 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
283 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
284 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
285 addi r4,r4,1 ; Bump up the mapped page count
91447636 286 srw r9,r9,r11 ; Isolate just the page index
55e303ae
A
287 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
288 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
289 xor r9,r9,r10 ; Get the hash to the PTEG
290
91447636 291 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
55e303ae
A
292
293 bl mapPhysFindLock ; Go find and lock the physent
294
295 bt++ pf64Bitb,ham64 ; This is 64-bit...
296
297 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
298 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
299 slwi r9,r9,6 ; Make PTEG offset
300 ori r7,r7,0xFFC0 ; Stick in the bottom part
91447636 301 rlwinm r12,r11,0,~ppFlags ; Clean it up
55e303ae
A
302 and r9,r9,r7 ; Wrap offset into table
303 mr r4,r31 ; Set the link to install
304 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
305 stw r12,mpAlias+4(r31) ; Move to the mapping
306 bl mapPhyCSet32 ; Install the link
307 b hamDone ; Go finish up...
308
309 .align 5
1c79356b 310
91447636 311ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
55e303ae
A
312 subfic r7,r7,46 ; Get number of leading zeros
313 eqv r4,r4,r4 ; Get all ones
314 ld r11,ppLink(r3) ; Get the alias chain pointer
91447636 315 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
316 srd r4,r4,r7 ; Get the wrap mask
317 sldi r9,r9,7 ; Change hash to PTEG offset
318 andc r11,r11,r0 ; Clean out the lock and flags
319 and r9,r9,r4 ; Wrap to PTEG
320 mr r4,r31
321 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
322 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
323
324 bl mapPhyCSet64 ; Install the link
325
326hamDone: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 327
55e303ae
A
328hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
329 bl sxlkUnlock ; Unlock the search list
1c79356b 330
55e303ae
A
331 mr r3,r31 ; Get the mapping pointer
332 bl mapDropBusy ; Drop the busy count
1c79356b 333
55e303ae
A
334 li r3,0 ; Set successful return
335 li r4,0 ; Set successful return
1c79356b 336
55e303ae 337hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
1c79356b 338
55e303ae
A
339 mtmsr r17 ; Restore enables/translation/etc.
340 isync
341 b hamReturnC ; Join common...
1c79356b 342
55e303ae
A
343hamR64: mtmsrd r17 ; Restore enables/translation/etc.
344 isync
1c79356b 345
3a60a9f5 346hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
55e303ae
A
347 lwz r17,FM_ARG0+0x00(r1) ; Save a register
348 lwz r18,FM_ARG0+0x04(r1) ; Save a register
349 lwz r19,FM_ARG0+0x08(r1) ; Save a register
350 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
351 mtlr r0 ; Restore the return
352 lwz r21,FM_ARG0+0x10(r1) ; Save a register
353 lwz r22,FM_ARG0+0x14(r1) ; Save a register
354 lwz r23,FM_ARG0+0x18(r1) ; Save a register
355 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
356 lwz r25,FM_ARG0+0x20(r1) ; Save a register
357 lwz r26,FM_ARG0+0x24(r1) ; Save a register
358 lwz r27,FM_ARG0+0x28(r1) ; Save a register
359 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
360 lwz r29,FM_ARG0+0x30(r1) ; Save a register
361 lwz r30,FM_ARG0+0x34(r1) ; Save a register
362 lwz r31,FM_ARG0+0x38(r1) ; Save a register
363 lwz r1,0(r1) ; Pop the stack
d7e50217 364
55e303ae 365 blr ; Leave...
d7e50217 366
de355530 367
de355530 368 .align 5
d7e50217 369
55e303ae
A
370hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
371 li r0,mpC|mpR ; Get a mask to turn off RC bits
372 lwz r23,mpFlags(r31) ; Get the requested flags
373 lwz r20,mpVAddr(r3) ; Get the overlay address
374 lwz r8,mpVAddr(r31) ; Get the requested address
375 lwz r21,mpVAddr+4(r3) ; Get the overlay address
376 lwz r9,mpVAddr+4(r31) ; Get the requested address
377 lhz r10,mpBSize(r3) ; Get the overlay length
378 lhz r11,mpBSize(r31) ; Get the requested length
379 lwz r24,mpPAddr(r3) ; Get the overlay physical address
380 lwz r25,mpPAddr(r31) ; Get the requested physical address
381 andc r21,r21,r0 ; Clear RC bits
382 andc r9,r9,r0 ; Clear RC bits
383
384 la r3,pmapSXlk(r28) ; Point to the pmap search lock
385 bl sxlkUnlock ; Unlock the search list
386
387 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
388 mr r3,r20 ; Save the top of the colliding address
389 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
390
391 bne++ hamRemv ; Removing, go say so so we help...
392
393 cmplw r20,r8 ; High part of vaddr the same?
394 cmplw cr1,r21,r9 ; Low part?
395 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
396
397 cmplw r10,r11 ; Size the same?
398 cmplw cr1,r24,r25 ; Physical address?
399 crand cr5_eq,cr5_eq,cr0_eq ; Remember
400 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
401
91447636
A
402 xor r23,r23,r22 ; Compare mapping flag words
403 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
55e303ae 404 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
91447636 405 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
55e303ae
A
406
407 ori r4,r4,mapRtMapDup ; Set duplicate
408 b hamReturn ; And leave...
409
410hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
411 b hamReturn ; Come back yall...
91447636
A
412
413hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
414 b hamReturn ; Join common epilog code
55e303ae
A
415
416 .align 5
417
418hamBadLock: li r3,0 ; Set lock time out error code
419 li r4,mapRtBadLk ; Set lock time out error code
420 b hamReturn ; Leave....
421
91447636
A
422hamPanic: lis r0,hi16(Choke) ; System abend
423 ori r0,r0,lo16(Choke) ; System abend
424 li r3,failMapping ; Show that we failed some kind of mapping thing
425 sc
55e303ae 426
1c79356b 427
1c79356b
A
428
429
430/*
55e303ae 431 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
de355530 432 *
55e303ae
A
433 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
434 * a 64-bit quantity, it is a long long so it is in R4 and R5.
435 *
436 * We return the virtual address of the removed mapping as a
437 * R3.
1c79356b 438 *
55e303ae 439 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 440 *
55e303ae
A
441 * We disable translation and all interruptions here. This keeps is
442 * from having to worry about a deadlock due to having anything locked
443 * and needing it to process a fault.
1c79356b
A
444 *
445 * Note that this must be done with both interruptions off and VM off
446 *
55e303ae
A
447 * Remove mapping via pmap, regular page, no pte
448 *
449 * 1) lock pmap share
450 * 2) find mapping full path - finds all possible list previous elements
451 * 4) upgrade pmap to exclusive
452 * 3) bump mapping busy count
453 * 5) remove mapping from search list
454 * 6) unlock pmap
455 * 7) lock physent
456 * 8) remove from physent
457 * 9) unlock physent
458 * 10) drop mapping busy count
459 * 11) drain mapping busy count
460 *
461 *
462 * Remove mapping via pmap, regular page, with pte
463 *
464 * 1) lock pmap share
465 * 2) find mapping full path - finds all possible list previous elements
466 * 3) upgrade lock to exclusive
467 * 4) bump mapping busy count
468 * 5) lock PTEG
469 * 6) invalidate pte and tlbie
470 * 7) atomic merge rc into physent
471 * 8) unlock PTEG
472 * 9) remove mapping from search list
473 * 10) unlock pmap
474 * 11) lock physent
475 * 12) remove from physent
476 * 13) unlock physent
477 * 14) drop mapping busy count
478 * 15) drain mapping busy count
479 *
480 *
481 * Remove mapping via pmap, I/O or block
482 *
483 * 1) lock pmap share
484 * 2) find mapping full path - finds all possible list previous elements
485 * 3) upgrade lock to exclusive
486 * 4) bump mapping busy count
487 * 5) mark remove-in-progress
488 * 6) check and bump remove chunk cursor if needed
489 * 7) unlock pmap
490 * 8) if something to invalidate, go to step 11
491
492 * 9) drop busy
493 * 10) return with mapRtRemove to force higher level to call again
494
495 * 11) Lock PTEG
496 * 12) invalidate ptes, no tlbie
497 * 13) unlock PTEG
498 * 14) repeat 11 - 13 for all pages in chunk
499 * 15) if not final chunk, go to step 9
500 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
501 * 17) lock pmap share
502 * 18) find mapping full path - finds all possible list previous elements
503 * 19) upgrade lock to exclusive
504 * 20) remove mapping from search list
505 * 21) drop mapping busy count
506 * 22) drain mapping busy count
507 *
1c79356b
A
508 */
509
510 .align 5
511 .globl EXT(hw_rem_map)
512
513LEXT(hw_rem_map)
1c79356b 514
55e303ae
A
515;
516; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
517; THE HW_PURGE_* ROUTINES ALSO
518;
1c79356b 519
55e303ae
A
520#define hrmStackSize ((31-15+1)*4)+4
521 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
522 mflr r0 ; Save the link register
523 stw r15,FM_ARG0+0x00(r1) ; Save a register
524 stw r16,FM_ARG0+0x04(r1) ; Save a register
525 stw r17,FM_ARG0+0x08(r1) ; Save a register
526 stw r18,FM_ARG0+0x0C(r1) ; Save a register
527 stw r19,FM_ARG0+0x10(r1) ; Save a register
528 mfsprg r19,2 ; Get feature flags
529 stw r20,FM_ARG0+0x14(r1) ; Save a register
530 stw r21,FM_ARG0+0x18(r1) ; Save a register
531 mtcrf 0x02,r19 ; move pf64Bit cr6
532 stw r22,FM_ARG0+0x1C(r1) ; Save a register
533 stw r23,FM_ARG0+0x20(r1) ; Save a register
534 stw r24,FM_ARG0+0x24(r1) ; Save a register
535 stw r25,FM_ARG0+0x28(r1) ; Save a register
536 stw r26,FM_ARG0+0x2C(r1) ; Save a register
537 stw r27,FM_ARG0+0x30(r1) ; Save a register
538 stw r28,FM_ARG0+0x34(r1) ; Save a register
539 stw r29,FM_ARG0+0x38(r1) ; Save a register
540 stw r30,FM_ARG0+0x3C(r1) ; Save a register
541 stw r31,FM_ARG0+0x40(r1) ; Save a register
542 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
543 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
544
91447636
A
545#if DEBUG
546 lwz r11,pmapFlags(r3) ; Get pmaps flags
547 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
548 bne hrmPanic ; Call not valid for guest shadow assist pmap
549#endif
550
55e303ae
A
551 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
552 lwz r9,pmapvr+4(r3) ; Get conversion mask
553 b hrmSF1x ; Done...
554
555hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
556
557hrmSF1x:
558 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
559
560 xor r28,r3,r9 ; Convert the pmap to physical addressing
1c79356b 561
55e303ae
A
562;
563; Here is where we join in from the hw_purge_* routines
564;
1c79356b 565
91447636
A
566hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
567 mfsprg r19,2 ; Get feature flags again (for alternate entries)
1c79356b 568
55e303ae
A
569 mr r17,r11 ; Save the MSR
570 mr r29,r4 ; Top half of vaddr
571 mr r30,r5 ; Bottom half of vaddr
1c79356b 572
91447636
A
573 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
574 bne-- hrmGuest ; Yes, handle specially
575
55e303ae
A
576 la r3,pmapSXlk(r28) ; Point to the pmap search lock
577 bl sxlkShared ; Go get a shared lock on the mapping lists
578 mr. r3,r3 ; Did we get the lock?
579 bne-- hrmBadLock ; Nope...
1c79356b 580
55e303ae
A
581;
582; Note that we do a full search (i.e., no shortcut level skips, etc.)
583; here so that we will know the previous elements so we can dequeue them
584; later. Note: we get back mpFlags in R7.
585;
d7e50217 586
55e303ae
A
587 mr r3,r28 ; Pass in pmap to search
588 mr r4,r29 ; High order of address
589 mr r5,r30 ; Low order of address
590 bl EXT(mapSearchFull) ; Go see if we can find it
91447636
A
591
592 andi. r0,r7,mpPerm ; Mapping marked permanent?
593 crmove cr5_eq,cr0_eq ; Remember permanent marking
55e303ae 594 mr r20,r7 ; Remember mpFlags
55e303ae 595 mr. r31,r3 ; Did we? (And remember mapping address for later)
55e303ae 596 mr r15,r4 ; Save top of next vaddr
55e303ae 597 mr r16,r5 ; Save bottom of next vaddr
91447636 598 beq-- hrmNotFound ; Nope, not found...
55e303ae
A
599
600 bf-- cr5_eq,hrmPerm ; This one can't be removed...
601;
602; Here we try to promote to an exclusive lock. This will fail if someone else
603; has it shared.
604;
1c79356b 605
55e303ae
A
606 la r3,pmapSXlk(r28) ; Point to the pmap search lock
607 bl sxlkPromote ; Try to promote shared to exclusive
608 mr. r3,r3 ; Could we?
609 beq++ hrmGotX ; Yeah...
1c79356b 610
55e303ae
A
611;
612; Since we could not promote our lock, we need to convert to it.
613; That means that we drop the shared lock and wait to get it
614; exclusive. Since we release the lock, we need to do the look up
615; again.
616;
617
618 la r3,pmapSXlk(r28) ; Point to the pmap search lock
619 bl sxlkConvert ; Convert shared to exclusive
620 mr. r3,r3 ; Could we?
621 bne-- hrmBadLock ; Nope, we must have timed out...
622
623 mr r3,r28 ; Pass in pmap to search
624 mr r4,r29 ; High order of address
625 mr r5,r30 ; Low order of address
626 bl EXT(mapSearchFull) ; Rescan the list
627
91447636
A
628 andi. r0,r7,mpPerm ; Mapping marked permanent?
629 crmove cr5_eq,cr0_eq ; Remember permanent marking
55e303ae 630 mr. r31,r3 ; Did we lose it when we converted?
55e303ae 631 mr r20,r7 ; Remember mpFlags
55e303ae
A
632 mr r15,r4 ; Save top of next vaddr
633 mr r16,r5 ; Save bottom of next vaddr
634 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
de355530 635
55e303ae
A
636 bf-- cr5_eq,hrmPerm ; This one can't be removed...
637
638;
639; We have an exclusive lock on the mapping chain. And we
640; also have the busy count bumped in the mapping so it can
641; not vanish on us.
642;
643
644hrmGotX: mr r3,r31 ; Get the mapping
645 bl mapBumpBusy ; Bump up the busy count
1c79356b 646
55e303ae
A
647;
648; Invalidate any PTEs associated with this
649; mapping (more than one if a block) and accumulate the reference
650; and change bits.
651;
652; Here is also where we need to split 32- and 64-bit processing
653;
1c79356b 654
55e303ae
A
655 lwz r21,mpPte(r31) ; Grab the offset to the PTE
656 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
657 mfsdr1 r29 ; Get the hash table base and size
91447636
A
658
659 rlwinm r0,r20,0,mpType ; Isolate mapping type
660 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
661 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
662
55e303ae
A
663 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
664 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
665 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
91447636
A
666 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
667 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
55e303ae
A
668 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
669 andc r29,r29,r2 ; Clean up hash table base
670 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
671 mr r30,r23 ; Move the now merged vaddr to the correct register
672 add r26,r29,r21 ; Point to the PTEG slot
673
674 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
675
676 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
91447636 677 beq- cr5,hrmBlock32 ; Go treat block specially...
55e303ae
A
678 subfic r9,r9,-4 ; Get the PCA entry offset
679 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
680 add r7,r9,r29 ; Point to the PCA slot
55e303ae
A
681
682 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
683
684 lwz r21,mpPte(r31) ; Get the quick pointer again
685 lwz r5,0(r26) ; Get the top of PTE
1c79356b 686
55e303ae 687 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
91447636 688 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
55e303ae
A
689 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
690 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
691 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
1c79356b 692
55e303ae 693 stw r5,0(r26) ; Invalidate the PTE
1c79356b 694
55e303ae 695 li r9,tlbieLock ; Get the TLBIE lock
1c79356b 696
55e303ae
A
697 sync ; Make sure the invalid PTE is actually in memory
698
699hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
700 mr. r5,r5 ; Is it locked?
701 li r5,1 ; Get locked indicator
702 bne- hrmPtlb32 ; It is locked, go spin...
703 stwcx. r5,0,r9 ; Try to get it
704 bne- hrmPtlb32 ; We was beat...
705
706 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
707
708 tlbie r30 ; Invalidate it all corresponding TLB entries
1c79356b 709
55e303ae 710 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
de355530 711
55e303ae
A
712 eieio ; Make sure that the tlbie happens first
713 tlbsync ; Wait for everyone to catch up
714 sync ; Make sure of it all
715
716hrmNTlbs: li r0,0 ; Clear this
717 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
718 stw r0,tlbieLock(0) ; Clear the tlbie lock
719 lis r0,0x8000 ; Get bit for slot 0
720 eieio ; Make sure those RC bit have been stashed in PTE
721
722 srw r0,r0,r2 ; Get the allocation hash mask
723 lwz r22,4(r26) ; Get the latest reference and change bits
724 or r6,r6,r0 ; Show that this slot is free
725
726hrmUlckPCA32:
727 eieio ; Make sure all updates come first
728 stw r6,0(r7) ; Unlock the PTEG
729
730;
731; Now, it is time to remove the mapping and unlock the chain.
732; But first, we need to make sure no one else is using this
733; mapping so we drain the busy now
734;
9bccf70c 735
55e303ae
A
736hrmPysDQ32: mr r3,r31 ; Point to the mapping
737 bl mapDrainBusy ; Go wait until mapping is unused
d7e50217 738
55e303ae
A
739 mr r3,r28 ; Get the pmap to remove from
740 mr r4,r31 ; Point to the mapping
741 bl EXT(mapRemove) ; Remove the mapping from the list
d7e50217 742
55e303ae 743 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
91447636
A
744 rlwinm r0,r20,0,mpType ; Isolate mapping type
745 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
55e303ae
A
746 la r3,pmapSXlk(r28) ; Point to the pmap search lock
747 subi r4,r4,1 ; Drop down the mapped page count
748 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
749 bl sxlkUnlock ; Unlock the search list
750
91447636 751 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
1c79356b 752
55e303ae 753 bl mapPhysFindLock ; Go find and lock the physent
de355530 754
55e303ae
A
755 lwz r9,ppLink+4(r3) ; Get first mapping
756
757 mr r4,r22 ; Get the RC bits we just got
758 bl mapPhysMerge ; Go merge the RC bits
759
91447636 760 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
d7e50217 761
55e303ae
A
762 cmplw r9,r31 ; Are we the first on the list?
763 bne- hrmNot1st ; Nope...
d7e50217 764
55e303ae
A
765 li r9,0 ; Get a 0
766 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
767 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
768 bl mapPhyCSet32 ; Go set the physent link and preserve flags
d7e50217 769
55e303ae 770 b hrmPhyDQd ; Join up and unlock it all...
d7e50217 771
55e303ae 772 .align 5
d7e50217 773
55e303ae
A
774hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
775 and r8,r8,r31 ; Get back to a page
776 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
de355530 777
55e303ae
A
778 la r3,pmapSXlk(r28) ; Point to the pmap search lock
779 bl sxlkUnlock ; Unlock the search list
780
781 xor r3,r31,r8 ; Flip mapping address to virtual
782 ori r3,r3,mapRtPerm ; Set permanent mapping error
783 b hrmErRtn
784
785hrmBadLock: li r3,mapRtBadLk ; Set bad lock
786 b hrmErRtn
787
788hrmEndInSight:
789 la r3,pmapSXlk(r28) ; Point to the pmap search lock
790 bl sxlkUnlock ; Unlock the search list
791
792hrmDoneChunk:
793 mr r3,r31 ; Point to the mapping
794 bl mapDropBusy ; Drop the busy here since we need to come back
795 li r3,mapRtRemove ; Say we are still removing this
796 b hrmErRtn
1c79356b 797
55e303ae
A
798 .align 5
799
800hrmNotFound:
801 la r3,pmapSXlk(r28) ; Point to the pmap search lock
802 bl sxlkUnlock ; Unlock the search list
91447636 803 li r3,mapRtNotFnd ; No mapping found
1c79356b 804
55e303ae 805hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
1c79356b 806
55e303ae
A
807 mtmsr r17 ; Restore enables/translation/etc.
808 isync
809 b hrmRetnCmn ; Join the common return code...
de355530 810
55e303ae
A
811hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
812 isync
813 b hrmRetnCmn ; Join the common return code...
1c79356b
A
814
815 .align 5
1c79356b 816
55e303ae
A
817hrmNot1st: mr. r8,r9 ; Remember and test current node
818 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
819 lwz r9,mpAlias+4(r9) ; Chain to the next
820 cmplw r9,r31 ; Is this us?
821 bne- hrmNot1st ; Not us...
822
823 lwz r9,mpAlias+4(r9) ; Get our forward pointer
824 stw r9,mpAlias+4(r8) ; Unchain us
d7e50217 825
55e303ae
A
826 nop ; For alignment
827
828hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 829
55e303ae
A
830hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
831 mr r3,r31 ; Copy the pointer to the mapping
832 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
833 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 834
55e303ae 835 xor r3,r31,r8 ; Flip mapping address to virtual
1c79356b 836
55e303ae
A
837 mtmsr r17 ; Restore enables/translation/etc.
838 isync
1c79356b 839
55e303ae
A
840hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
841 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
842 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
843 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
844 mr. r6,r6 ; Should we pass back the "next" vaddr?
845 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
846 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
847 mtlr r0 ; Restore the return
848
849 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
850 beq hrmNoNextAdr ; Do not pass back the next vaddr...
851 stw r15,0(r6) ; Pass back the top of the next vaddr
852 stw r16,4(r6) ; Pass back the bottom of the next vaddr
853
854hrmNoNextAdr:
855 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
856 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
857 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
858 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
859 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
860 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
861 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
862 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
863 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
864 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
865 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
866 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
867 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
868 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
869 lwz r1,0(r1) ; Pop the stack
870 blr ; Leave...
871
872;
873; Here is where we come when all is lost. Somehow, we failed a mapping function
874; that must work... All hope is gone. Alas, we die.......
875;
d7e50217 876
55e303ae
A
877hrmPanic: lis r0,hi16(Choke) ; System abend
878 ori r0,r0,lo16(Choke) ; System abend
879 li r3,failMapping ; Show that we failed some kind of mapping thing
880 sc
1c79356b
A
881
882
55e303ae
A
883;
884; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
885; in the range. Then, if we did not finish, return a code indicating that we need to
886; be called again. Eventually, we will finish and then, we will do a TLBIE for each
887; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
888;
889; A potential speed up is that we stop the invalidate loop once we have walked through
890; the hash table once. This really is not worth the trouble because we need to have
891; mapped 1/2 of physical RAM in an individual block. Way unlikely.
892;
893; We should rethink this and see if we think it will be faster to check PTE and
894; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
895;
1c79356b 896
55e303ae 897 .align 5
1c79356b 898
3a60a9f5
A
899hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu
900 rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
55e303ae 901 lhz r25,mpBSize(r31) ; Get the number of pages in block
3a60a9f5 902 lhz r23,mpSpace(r31) ; Get the address space hash
55e303ae 903 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
3a60a9f5
A
904 rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13
905 addi r25,r25,1 ; Account for zero-based counting
55e303ae 906 ori r0,r20,mpRIP ; Turn on the remove in progress flag
3a60a9f5 907 slw r25,r25,r29 ; Adjust for 32MB if needed
55e303ae
A
908 mfsdr1 r29 ; Get the hash table base and size
909 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
3a60a9f5 910 subi r25,r25,1 ; Convert back to zero-based counting
55e303ae
A
911 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
912 sub r4,r25,r9 ; Get number of pages left
913 cmplw cr1,r9,r25 ; Have we already hit the end?
914 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
915 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
916 rlwinm r26,r29,16,7,15 ; Get the hash table size
917 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
918 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
919 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
920 cmpwi cr7,r2,0 ; Remember if we have finished
921 slwi r0,r9,12 ; Make cursor into page offset
922 or r24,r24,r23 ; Get full hash
923 and r4,r4,r2 ; If more than a chunk, bring this back to 0
924 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
925 add r27,r27,r0 ; Adjust vaddr to start of current chunk
926 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
927
928 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
929
930 la r3,pmapSXlk(r28) ; Point to the pmap search lock
931 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
932 bl sxlkUnlock ; Unlock the search list while we are invalidating
933
934 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
935 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
936 xor r24,r24,r8 ; Get the proper VSID
937 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
938 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
939 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
940 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
941 add r22,r22,r30 ; Get end address (in PTEG units)
942
943hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
944 xor r23,r23,r24 ; Hash it
945 and r23,r23,r26 ; Wrap it into the table
946 rlwinm r3,r23,28,4,29 ; Change to PCA offset
947 subfic r3,r3,-4 ; Get the PCA entry offset
948 add r7,r3,r29 ; Point to the PCA slot
949 cmplw cr5,r30,r22 ; Check if we reached the end of the range
950 addi r30,r30,64 ; bump to the next vaddr
951
952 bl mapLockPteg ; Lock the PTEG
953
954 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
955 add r5,r23,r29 ; Point to the PTEG
956 li r0,0 ; Set an invalid PTE value
957 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
958 mtcrf 0x80,r4 ; Set CRs to select PTE slots
959 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 960
55e303ae
A
961 bf 0,hrmSlot0 ; No autogen here
962 stw r0,0x00(r5) ; Invalidate PTE
1c79356b 963
55e303ae
A
964hrmSlot0: bf 1,hrmSlot1 ; No autogen here
965 stw r0,0x08(r5) ; Invalidate PTE
1c79356b 966
55e303ae
A
967hrmSlot1: bf 2,hrmSlot2 ; No autogen here
968 stw r0,0x10(r5) ; Invalidate PTE
1c79356b 969
55e303ae
A
970hrmSlot2: bf 3,hrmSlot3 ; No autogen here
971 stw r0,0x18(r5) ; Invalidate PTE
1c79356b 972
55e303ae
A
973hrmSlot3: bf 4,hrmSlot4 ; No autogen here
974 stw r0,0x20(r5) ; Invalidate PTE
1c79356b 975
55e303ae
A
976hrmSlot4: bf 5,hrmSlot5 ; No autogen here
977 stw r0,0x28(r5) ; Invalidate PTE
1c79356b 978
55e303ae
A
979hrmSlot5: bf 6,hrmSlot6 ; No autogen here
980 stw r0,0x30(r5) ; Invalidate PTE
1c79356b 981
55e303ae
A
982hrmSlot6: bf 7,hrmSlot7 ; No autogen here
983 stw r0,0x38(r5) ; Invalidate PTE
1c79356b 984
55e303ae
A
985hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
986 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
987 andc r6,r6,r0 ; Turn off all the old autogen bits
9bccf70c 988
55e303ae 989hrmBNone32: eieio ; Make sure all updates come first
9bccf70c 990
55e303ae 991 stw r6,0(r7) ; Unlock and set the PCA
1c79356b 992
55e303ae 993 bne+ cr5,hrmBInv32 ; Go invalidate the next...
1c79356b 994
55e303ae 995 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1c79356b 996
55e303ae
A
997 mr r3,r31 ; Copy the pointer to the mapping
998 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1c79356b 999
55e303ae
A
1000 sync ; Make sure memory is consistent
1001
1002 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1003 li r6,63 ; Assume full invalidate for now
1004 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1005 andc r6,r6,r5 ; Clear max if we have less to do
1006 and r5,r25,r5 ; Clear count if we have more than max
1007 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1008 li r7,tlbieLock ; Get the TLBIE lock
1009 or r5,r5,r6 ; Get number of TLBIEs needed
1010
1011hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1012 mr. r2,r2 ; Is it locked?
1013 li r2,1 ; Get our lock value
1014 bne- hrmBTLBlck ; It is locked, go wait...
1015 stwcx. r2,0,r7 ; Try to get it
1016 bne- hrmBTLBlck ; We was beat...
1017
1018hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1019 tlbie r27 ; Invalidate it everywhere
1020 addi r27,r27,0x1000 ; Up to the next page
1021 bge+ hrmBTLBi ; Make sure we have done it all...
1022
1023 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1024 li r2,0 ; Lock clear value
1025
1026 sync ; Make sure all is quiet
1027 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1028
1029 eieio ; Make sure that the tlbie happens first
1030 tlbsync ; Wait for everyone to catch up
1031 sync ; Wait for quiet again
1032
1033hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1034
1035 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1036 bl sxlkShared ; Go get a shared lock on the mapping lists
1037 mr. r3,r3 ; Did we get the lock?
1038 bne- hrmPanic ; Nope...
1039
1040 lwz r4,mpVAddr(r31) ; High order of address
1041 lwz r5,mpVAddr+4(r31) ; Low order of address
1042 mr r3,r28 ; Pass in pmap to search
1043 mr r29,r4 ; Save this in case we need it (only promote fails)
1044 mr r30,r5 ; Save this in case we need it (only promote fails)
1045 bl EXT(mapSearchFull) ; Go see if we can find it
1046
1047 mr. r3,r3 ; Did we? (And remember mapping address for later)
1048 mr r15,r4 ; Save top of next vaddr
1049 mr r16,r5 ; Save bottom of next vaddr
1050 beq- hrmPanic ; Nope, not found...
1051
1052 cmplw r3,r31 ; Same mapping?
1053 bne- hrmPanic ; Not good...
1054
1055 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1056 bl sxlkPromote ; Try to promote shared to exclusive
1057 mr. r3,r3 ; Could we?
1058 mr r3,r31 ; Restore the mapping pointer
1059 beq+ hrmBDone1 ; Yeah...
1060
1061 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1062 bl sxlkConvert ; Convert shared to exclusive
1063 mr. r3,r3 ; Could we?
1064 bne-- hrmPanic ; Nope, we must have timed out...
1065
1066 mr r3,r28 ; Pass in pmap to search
1067 mr r4,r29 ; High order of address
1068 mr r5,r30 ; Low order of address
1069 bl EXT(mapSearchFull) ; Rescan the list
1070
1071 mr. r3,r3 ; Did we lose it when we converted?
1072 mr r15,r4 ; Save top of next vaddr
1073 mr r16,r5 ; Save bottom of next vaddr
1074 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1075
1076hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1077
1078 mr r3,r28 ; Get the pmap to remove from
1079 mr r4,r31 ; Point to the mapping
1080 bl EXT(mapRemove) ; Remove the mapping from the list
1081
1082 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1083 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1084 subi r4,r4,1 ; Drop down the mapped page count
1085 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1086 bl sxlkUnlock ; Unlock the search list
1087
1088 b hrmRetn32 ; We are all done, get out...
1c79356b 1089
55e303ae
A
1090;
1091; Here we handle the 64-bit version of hw_rem_map
1092;
1093
1c79356b 1094 .align 5
55e303ae
A
1095
1096hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
91447636 1097 beq-- cr5,hrmBlock64 ; Go treat block specially...
55e303ae
A
1098 subfic r9,r9,-4 ; Get the PCA entry offset
1099 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1100 add r7,r9,r29 ; Point to the PCA slot
1101
1102 bl mapLockPteg ; Go lock up the PTEG
1103
1104 lwz r21,mpPte(r31) ; Get the quick pointer again
1105 ld r5,0(r26) ; Get the top of PTE
1106
1107 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
91447636 1108 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
55e303ae 1109 sldi r23,r5,16 ; Shift AVPN up to EA format
91447636 1110// **** Need to adjust above shift based on the page size - large pages need to shift a bit more
55e303ae
A
1111 rldicr r5,r5,0,62 ; Clear the valid bit
1112 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1113 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1114 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1115
1116 std r5,0(r26) ; Invalidate the PTE
1117
1118 li r9,tlbieLock ; Get the TLBIE lock
1119
1120 sync ; Make sure the invalid PTE is actually in memory
1121
1122hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1123 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1124 mr. r5,r5 ; Is it locked?
1125 li r5,1 ; Get locked indicator
1126 bne-- hrmPtlb64w ; It is locked, go spin...
1127 stwcx. r5,0,r9 ; Try to get it
1128 bne-- hrmPtlb64 ; We was beat...
1129
91447636 1130 tlbie r23 ; Invalidate all corresponding TLB entries
1c79356b 1131
55e303ae
A
1132 eieio ; Make sure that the tlbie happens first
1133 tlbsync ; Wait for everyone to catch up
55e303ae
A
1134
1135 ptesync ; Make sure of it all
1136 li r0,0 ; Clear this
1137 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1138 stw r0,tlbieLock(0) ; Clear the tlbie lock
1139 oris r0,r0,0x8000 ; Assume slot 0
91447636 1140
55e303ae 1141 srw r0,r0,r2 ; Get slot mask to deallocate
d7e50217 1142
55e303ae
A
1143 lwz r22,12(r26) ; Get the latest reference and change bits
1144 or r6,r6,r0 ; Make the guy we killed free
de355530 1145
55e303ae
A
1146hrmUlckPCA64:
1147 eieio ; Make sure all updates come first
1148
1149 stw r6,0(r7) ; Unlock and change the PCA
1150
1151hrmPysDQ64: mr r3,r31 ; Point to the mapping
1152 bl mapDrainBusy ; Go wait until mapping is unused
1153
91447636 1154 mr r3,r28 ; Get the pmap to remove from
55e303ae
A
1155 mr r4,r31 ; Point to the mapping
1156 bl EXT(mapRemove) ; Remove the mapping from the list
1157
91447636
A
1158 rlwinm r0,r20,0,mpType ; Isolate mapping type
1159 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
55e303ae 1160 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
55e303ae
A
1161 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1162 subi r4,r4,1 ; Drop down the mapped page count
1163 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1164 bl sxlkUnlock ; Unlock the search list
1165
91447636 1166 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1c79356b 1167
55e303ae 1168 bl mapPhysFindLock ; Go find and lock the physent
1c79356b 1169
91447636 1170 li r0,ppLFAmask ; Get mask to clean up mapping pointer
55e303ae 1171 ld r9,ppLink(r3) ; Get first mapping
91447636 1172 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae 1173 mr r4,r22 ; Get the RC bits we just got
1c79356b 1174
55e303ae 1175 bl mapPhysMerge ; Go merge the RC bits
d7e50217 1176
55e303ae 1177 andc r9,r9,r0 ; Clean up the mapping pointer
d7e50217 1178
55e303ae 1179 cmpld r9,r31 ; Are we the first on the list?
91447636 1180 bne-- hrmNot1st64 ; Nope...
1c79356b 1181
55e303ae
A
1182 li r9,0 ; Get a 0
1183 ld r4,mpAlias(r31) ; Get our forward pointer
1184
1185 std r9,mpAlias(r31) ; Make sure we are off the chain
1186 bl mapPhyCSet64 ; Go set the physent link and preserve flags
de355530 1187
55e303ae
A
1188 b hrmPhyDQd64 ; Join up and unlock it all...
1189
1190hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1191 stwcx. r5,0,r5 ; Clear the pending reservation
de355530 1192
d7e50217 1193
55e303ae
A
1194hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1195 mr. r5,r5 ; is it locked?
1196 beq++ hrmPtlb64 ; Nope...
1197 b hrmPtlb64x ; Sniff some more...
1198
1199 .align 5
1200
1201hrmNot1st64:
1202 mr. r8,r9 ; Remember and test current node
91447636 1203 beq-- hrmPhyDQd64 ; Could not find our node...
55e303ae
A
1204 ld r9,mpAlias(r9) ; Chain to the next
1205 cmpld r9,r31 ; Is this us?
91447636 1206 bne-- hrmNot1st64 ; Not us...
55e303ae
A
1207
1208 ld r9,mpAlias(r9) ; Get our forward pointer
1209 std r9,mpAlias(r8) ; Unchain us
1210
1211 nop ; For alignment
1212
1213hrmPhyDQd64:
1214 bl mapPhysUnlock ; Unlock the physent chain
1c79356b 1215
55e303ae
A
1216hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1217 mr r3,r31 ; Copy the pointer to the mapping
1218 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1219 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 1220
55e303ae 1221 xor r3,r31,r8 ; Flip mapping address to virtual
d7e50217 1222
55e303ae 1223 mtmsrd r17 ; Restore enables/translation/etc.
de355530 1224 isync
55e303ae
A
1225
1226 b hrmRetnCmn ; Join the common return path...
1c79356b 1227
1c79356b 1228
55e303ae
A
1229;
1230; Check hrmBlock32 for comments.
1231;
1c79356b 1232
de355530 1233 .align 5
55e303ae 1234
3a60a9f5
A
1235hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu
1236 rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
55e303ae
A
1237 lhz r24,mpSpace(r31) ; Get the address space hash
1238 lhz r25,mpBSize(r31) ; Get the number of pages in block
1239 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
3a60a9f5
A
1240 rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13
1241 addi r25,r25,1 ; Account for zero-based counting
55e303ae 1242 ori r0,r20,mpRIP ; Turn on the remove in progress flag
3a60a9f5 1243 slw r25,r25,r29 ; Adjust for 32MB if needed
55e303ae
A
1244 mfsdr1 r29 ; Get the hash table base and size
1245 ld r27,mpVAddr(r31) ; Get the base vaddr
3a60a9f5 1246 subi r25,r25,1 ; Convert back to zero-based counting
55e303ae
A
1247 rlwinm r5,r29,0,27,31 ; Isolate the size
1248 sub r4,r25,r9 ; Get number of pages left
1249 cmplw cr1,r9,r25 ; Have we already hit the end?
1250 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1251 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1252 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1253 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1254 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1255 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1256 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1257 srdi r27,r27,12 ; Change address into page index
1258 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1259 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1260
1261 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1262
1263 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1264 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1265 bl sxlkUnlock ; Unlock the search list while we are invalidating
1266
1267 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1268 eqv r26,r26,r26 ; Get all foxes here
1269 rldimi r24,r24,28,8 ; Make a couple copies up higher
1270 rldicr r29,r29,0,47 ; Isolate just the hash table base
1271 subfic r5,r5,46 ; Get number of leading zeros
1272 srd r26,r26,r5 ; Shift the size bits over
1273 mr r30,r27 ; Get start of chunk to invalidate
1274 rldicr r26,r26,0,56 ; Make length in PTEG units
1275 add r22,r4,r30 ; Get end page number
1276
1277hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1278 rldicr r0,r0,0,49 ; Clean all but segment portion
1279 rlwinm r2,r30,0,16,31 ; Get the current page index
1280 xor r0,r0,r24 ; Form VSID
1281 xor r8,r2,r0 ; Hash the vaddr
1282 sldi r8,r8,7 ; Make into PTEG offset
1283 and r23,r8,r26 ; Wrap into the hash table
1284 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1285 subfic r3,r3,-4 ; Get the PCA entry offset
1286 add r7,r3,r29 ; Point to the PCA slot
1287
1288 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1289
1290 bl mapLockPteg ; Lock the PTEG
1291
1292 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1293 add r5,r23,r29 ; Point to the PTEG
1294 li r0,0 ; Set an invalid PTE value
1295 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1296 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1297 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 1298
1c79356b 1299
55e303ae
A
1300 bf 0,hrmSlot0s ; No autogen here
1301 std r0,0x00(r5) ; Invalidate PTE
1c79356b 1302
55e303ae
A
1303hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1304 std r0,0x10(r5) ; Invalidate PTE
1c79356b 1305
55e303ae
A
1306hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1307 std r0,0x20(r5) ; Invalidate PTE
d7e50217 1308
55e303ae
A
1309hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1310 std r0,0x30(r5) ; Invalidate PTE
d7e50217 1311
55e303ae
A
1312hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1313 std r0,0x40(r5) ; Invalidate PTE
d7e50217 1314
55e303ae
A
1315hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1316 std r0,0x50(r5) ; Invalidate PTE
d7e50217 1317
55e303ae
A
1318hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1319 std r0,0x60(r5) ; Invalidate PTE
d7e50217 1320
55e303ae
A
1321hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1322 std r0,0x70(r5) ; Invalidate PTE
d7e50217 1323
55e303ae
A
1324hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1325 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1326 andc r6,r6,r0 ; Turn off all the old autogen bits
1327
1328hrmBNone64: eieio ; Make sure all updates come first
1329 stw r6,0(r7) ; Unlock and set the PCA
1330
1331 addi r30,r30,1 ; bump to the next PTEG
1332 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1333
1334 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1335
1336 mr r3,r31 ; Copy the pointer to the mapping
1337 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1338
1339 sync ; Make sure memory is consistent
1340
1341 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1342 li r6,255 ; Assume full invalidate for now
1343 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1344 andc r6,r6,r5 ; Clear max if we have less to do
1345 and r5,r25,r5 ; Clear count if we have more than max
1346 sldi r24,r24,28 ; Get the full XOR value over to segment position
1347 ld r27,mpVAddr(r31) ; Get the base vaddr
1348 li r7,tlbieLock ; Get the TLBIE lock
1349 or r5,r5,r6 ; Get number of TLBIEs needed
1c79356b 1350
55e303ae
A
1351hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1352 mr. r2,r2 ; Is it locked?
1353 li r2,1 ; Get our lock value
1354 bne-- hrmBTLBlcm ; It is locked, go wait...
1355 stwcx. r2,0,r7 ; Try to get it
1356 bne-- hrmBTLBlcl ; We was beat...
1357
1358hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1359 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1360 addic. r5,r5,-1 ; See if we did them all
1361 xor r2,r2,r24 ; Make the VSID
1362 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1363 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1c79356b 1364
55e303ae
A
1365 tlbie r2 ; Invalidate it everywhere
1366 addi r27,r27,0x1000 ; Up to the next page
1367 bge++ hrmBTLBj ; Make sure we have done it all...
1c79356b 1368
55e303ae
A
1369 eieio ; Make sure that the tlbie happens first
1370 tlbsync ; wait for everyone to catch up
1c79356b 1371
55e303ae 1372 li r2,0 ; Lock clear value
d7e50217 1373
55e303ae 1374 ptesync ; Wait for quiet again
55e303ae
A
1375
1376 stw r2,tlbieLock(0) ; Clear the tlbie lock
1377
1378 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1379 bl sxlkShared ; Go get a shared lock on the mapping lists
1380 mr. r3,r3 ; Did we get the lock?
1381 bne- hrmPanic ; Nope...
1382
1383 lwz r4,mpVAddr(r31) ; High order of address
1384 lwz r5,mpVAddr+4(r31) ; Low order of address
1385 mr r3,r28 ; Pass in pmap to search
1386 mr r29,r4 ; Save this in case we need it (only promote fails)
1387 mr r30,r5 ; Save this in case we need it (only promote fails)
1388 bl EXT(mapSearchFull) ; Go see if we can find it
1389
1390 mr. r3,r3 ; Did we? (And remember mapping address for later)
1391 mr r15,r4 ; Save top of next vaddr
1392 mr r16,r5 ; Save bottom of next vaddr
1393 beq- hrmPanic ; Nope, not found...
1394
1395 cmpld r3,r31 ; Same mapping?
1396 bne- hrmPanic ; Not good...
1397
1398 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1399 bl sxlkPromote ; Try to promote shared to exclusive
1400 mr. r3,r3 ; Could we?
1401 mr r3,r31 ; Restore the mapping pointer
1402 beq+ hrmBDone2 ; Yeah...
1403
1404 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1405 bl sxlkConvert ; Convert shared to exclusive
1406 mr. r3,r3 ; Could we?
1407 bne-- hrmPanic ; Nope, we must have timed out...
1408
1409 mr r3,r28 ; Pass in pmap to search
1410 mr r4,r29 ; High order of address
1411 mr r5,r30 ; Low order of address
1412 bl EXT(mapSearchFull) ; Rescan the list
1413
1414 mr. r3,r3 ; Did we lose it when we converted?
1415 mr r15,r4 ; Save top of next vaddr
1416 mr r16,r5 ; Save bottom of next vaddr
1417 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1418
1419hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1420
1421 mr r3,r28 ; Get the pmap to remove from
1422 mr r4,r31 ; Point to the mapping
1423 bl EXT(mapRemove) ; Remove the mapping from the list
1424
1425 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1426 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1427 subi r4,r4,1 ; Drop down the mapped page count
1428 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1429 bl sxlkUnlock ; Unlock the search list
1430
1431 b hrmRetn64 ; We are all done, get out...
1432
1433hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1434 stwcx. r2,0,r2 ; Unreserve it
1435
1436hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1437 mr. r2,r2 ; Is it held?
1438 beq++ hrmBTLBlcl ; Nope...
1439 b hrmBTLBlcn ; Yeah...
1c79356b 1440
91447636
A
1441;
1442; Guest shadow assist -- mapping remove
1443;
1444; Method of operation:
1445; o Locate the VMM extension block and the host pmap
1446; o Obtain the host pmap's search lock exclusively
1447; o Locate the requested mapping in the shadow hash table,
1448; exit if not found
1449; o If connected, disconnect the PTE and gather R&C to physent
1450; o Locate and lock the physent
1451; o Remove mapping from physent's chain
1452; o Unlock physent
1453; o Unlock pmap's search lock
1454;
1455; Non-volatile registers on entry:
1456; r17: caller's msr image
1457; r19: sprg2 (feature flags)
1458; r28: guest pmap's physical address
1459; r29: high-order 32 bits of guest virtual address
1460; r30: low-order 32 bits of guest virtual address
1461;
1462; Non-volatile register usage:
1463; r26: VMM extension block's physical address
1464; r27: host pmap's physical address
1465; r28: guest pmap's physical address
1466; r29: physent's physical address
1467; r30: guest virtual address
1468; r31: guest mapping's physical address
1469;
1470 .align 5
1471hrmGuest:
1472 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1473 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1474 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1475 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1476 b hrmGStart ; Join common code
1477
1478hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1479 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1480 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1481
1482hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1483 bl sxlkExclusive ; Get lock exclusive
1484
1485 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1486
1487 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1488 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1489 srwi r11,r30,12 ; Form shadow hash:
1490 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1491 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1492 ; Form index offset from hash page number
1493 add r31,r31,r12 ; r31 <- hash page index entry
1494 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1495 mtctr r0 ; in this group
1496 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1497 lwz r31,4(r31) ; r31 <- hash page paddr
1498 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1499 ; r31 <- hash group paddr
1500
1501 addi r3,r3,1 ; Increment remove request count
1502 stw r3,vxsGrm(r26) ; Update remove request count
1503
1504 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1505 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1506 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1507 b hrmG32SrchLp ; Let the search begin!
1508
1509 .align 5
1510hrmG32SrchLp:
1511 mr r6,r3 ; r6 <- current mapping slot's flags
1512 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1513 mr r7,r4 ; r7 <- current mapping slot's space ID
1514 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1515 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1516 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1517 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1518 xor r7,r7,r9 ; Compare space ID
1519 or r0,r11,r7 ; r0 <- !(free && space match)
1520 xor r8,r8,r30 ; Compare virtual address
1521 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1522 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1523
1524 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1525 bdnz hrmG32SrchLp ; Iterate
1526
1527 mr r6,r3 ; r6 <- current mapping slot's flags
1528 clrrwi r5,r5,12 ; Remove flags from virtual address
1529 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1530 xor r4,r4,r9 ; Compare space ID
1531 or r0,r11,r4 ; r0 <- !(free && space match)
1532 xor r5,r5,r30 ; Compare virtual address
1533 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1534 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1535 b hrmGSrchMiss ; No joy in our hash group
1536
1537hrmG64Search:
1538 ld r31,0(r31) ; r31 <- hash page paddr
1539 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1540 ; r31 <- hash group paddr
1541 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1542 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1543 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1544 b hrmG64SrchLp ; Let the search begin!
1545
1546 .align 5
1547hrmG64SrchLp:
1548 mr r6,r3 ; r6 <- current mapping slot's flags
1549 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1550 mr r7,r4 ; r7 <- current mapping slot's space ID
1551 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1552 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1553 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1554 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1555 xor r7,r7,r9 ; Compare space ID
1556 or r0,r11,r7 ; r0 <- !(free && space match)
1557 xor r8,r8,r30 ; Compare virtual address
1558 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1559 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1560
1561 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1562 bdnz hrmG64SrchLp ; Iterate
1563
1564 mr r6,r3 ; r6 <- current mapping slot's flags
1565 clrrdi r5,r5,12 ; Remove flags from virtual address
1566 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1567 xor r4,r4,r9 ; Compare space ID
1568 or r0,r11,r4 ; r0 <- !(free && space match)
1569 xor r5,r5,r30 ; Compare virtual address
1570 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1571 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1572hrmGSrchMiss:
1573 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1574 li r25,mapRtNotFnd ; Return not found
1575 addi r3,r3,1 ; Increment miss count
1576 stw r3,vxsGrmMiss(r26) ; Update miss count
1577 b hrmGReturn ; Join guest return
1578
1579 .align 5
1580hrmGSrchHit:
1581 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1582 bne hrmGDormant ; Yes, nothing to disconnect
1583
1584 lwz r3,vxsGrmActive(r26) ; Get active hit count
1585 addi r3,r3,1 ; Increment active hit count
1586 stw r3,vxsGrmActive(r26) ; Update hit count
1587
1588 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1589 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1590 ; r31 <- mapping's physical address
1591 ; r3 -> PTE slot physical address
1592 ; r4 -> High-order 32 bits of PTE
1593 ; r5 -> Low-order 32 bits of PTE
1594 ; r6 -> PCA
1595 ; r7 -> PCA physical address
1596 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1597 b hrmGFreePTE ; Join 64-bit path to release the PTE
1598hrmGDscon64:
1599 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1600 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1601hrmGFreePTE:
1602 mr. r3,r3 ; Was there a valid PTE?
1603 beq hrmGDormant ; No valid PTE, we're almost done
1604 lis r0,0x8000 ; Prepare free bit for this slot
1605 srw r0,r0,r2 ; Position free bit
1606 or r6,r6,r0 ; Set it in our PCA image
1607 lwz r8,mpPte(r31) ; Get PTE offset
1608 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1609 stw r8,mpPte(r31) ; Save invalidated PTE offset
1610 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1611 stw r6,0(r7) ; Update PCA and unlock the PTEG
1612
1613hrmGDormant:
1614 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1615 bl mapFindLockPN ; Find 'n' lock this page's physent
1616 mr. r29,r3 ; Got lock on our physent?
1617 beq-- hrmGBadPLock ; No, time to bail out
1618
1619 crset cr1_eq ; cr1_eq <- previous link is the anchor
1620 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1621 la r11,ppLink+4(r29) ; Point to chain anchor
1622 lwz r9,ppLink+4(r29) ; Get chain anchor
1623 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1624hrmGRemLoop:
1625 beq- hrmGPEMissMiss ; End of chain, this is not good
1626 cmplw r9,r31 ; Is this the mapping to remove?
1627 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1628 bne hrmGRemNext ; No, chain onward
1629 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1630 stw r8,0(r11) ; Unchain gpv->phys mapping
1631 b hrmGDelete ; Finish deleting mapping
1632hrmGRemRetry:
1633 lwarx r0,0,r11 ; Get previous link
1634 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1635 stwcx. r0,0,r11 ; Update previous link
1636 bne- hrmGRemRetry ; Lost reservation, retry
1637 b hrmGDelete ; Finish deleting mapping
1638
1639hrmGRemNext:
1640 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1641 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1642 mr. r9,r8 ; Does next entry exist?
1643 b hrmGRemLoop ; Carry on
1644
1645hrmGRemove64:
1646 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1647 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1648 la r11,ppLink(r29) ; Point to chain anchor
1649 ld r9,ppLink(r29) ; Get chain anchor
1650 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1651hrmGRem64Lp:
1652 beq-- hrmGPEMissMiss ; End of chain, this is not good
1653 cmpld r9,r31 ; Is this the mapping to remove?
1654 ld r8,mpAlias(r9) ; Get forward chain pinter
1655 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1656 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1657 std r8,0(r11) ; Unchain gpv->phys mapping
1658 b hrmGDelete ; Finish deleting mapping
1659hrmGRem64Rt:
1660 ldarx r0,0,r11 ; Get previous link
1661 and r0,r0,r7 ; Get flags
1662 or r0,r0,r8 ; Insert new forward pointer
1663 stdcx. r0,0,r11 ; Slam it back in
1664 bne-- hrmGRem64Rt ; Lost reservation, retry
1665 b hrmGDelete ; Finish deleting mapping
1666
1667 .align 5
1668hrmGRem64Nxt:
1669 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1670 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1671 mr. r9,r8 ; Does next entry exist?
1672 b hrmGRem64Lp ; Carry on
1673
1674hrmGDelete:
1675 mr r3,r29 ; r3 <- physent addr
1676 bl mapPhysUnlock ; Unlock physent chain
1677 lwz r3,mpFlags(r31) ; Get mapping's flags
1678 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1679 ori r3,r3,mpgFree ; Mark mapping free
1680 stw r3,mpFlags(r31) ; Update flags
1681 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1682
1683hrmGReturn:
1684 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1685 bl sxlkUnlock ; Release host pmap search lock
1686
1687 mr r3,r25 ; r3 <- return code
1688 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1689 mtmsr r17 ; Restore 'rupts, translation
1690 isync ; Throw a small wrench into the pipeline
1691 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1692hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1693 b hrmRetnCmn ; Join common return
1694
1695hrmGBadPLock:
1696hrmGPEMissMiss:
1697 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1698 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1699 li r3,failMapping ; All the way from New Orleans
1700 sc ; To Jeruselem
1c79356b
A
1701
1702
1703/*
55e303ae 1704 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1c79356b 1705 *
55e303ae 1706 * Upon entry, R3 contains a pointer to a physent.
1c79356b 1707 *
55e303ae
A
1708 * This function removes the first mapping from a physical entry
1709 * alias list. It locks the list, extracts the vaddr and pmap from
1710 * the first entry. It then jumps into the hw_rem_map function.
1711 * NOTE: since we jump into rem_map, we need to set up the stack
1712 * identically. Also, we set the next parm to 0 so we do not
1713 * try to save a next vaddr.
1714 *
1715 * We return the virtual address of the removed mapping as a
1716 * R3.
de355530 1717 *
55e303ae 1718 * Note that this is designed to be called from 32-bit mode with a stack.
de355530 1719 *
55e303ae
A
1720 * We disable translation and all interruptions here. This keeps is
1721 * from having to worry about a deadlock due to having anything locked
1722 * and needing it to process a fault.
1c79356b 1723 *
55e303ae
A
1724 * Note that this must be done with both interruptions off and VM off
1725 *
1726 *
1727 * Remove mapping via physical page (mapping_purge)
1728 *
1729 * 1) lock physent
1730 * 2) extract vaddr and pmap
1731 * 3) unlock physent
1732 * 4) do "remove mapping via pmap"
1733 *
1c79356b 1734 *
1c79356b
A
1735 */
1736
1737 .align 5
55e303ae
A
1738 .globl EXT(hw_purge_phys)
1739
1740LEXT(hw_purge_phys)
1741 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1742 mflr r0 ; Save the link register
1743 stw r15,FM_ARG0+0x00(r1) ; Save a register
1744 stw r16,FM_ARG0+0x04(r1) ; Save a register
1745 stw r17,FM_ARG0+0x08(r1) ; Save a register
1746 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1747 stw r19,FM_ARG0+0x10(r1) ; Save a register
1748 stw r20,FM_ARG0+0x14(r1) ; Save a register
1749 stw r21,FM_ARG0+0x18(r1) ; Save a register
1750 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1751 stw r23,FM_ARG0+0x20(r1) ; Save a register
1752 stw r24,FM_ARG0+0x24(r1) ; Save a register
1753 stw r25,FM_ARG0+0x28(r1) ; Save a register
1754 li r6,0 ; Set no next address return
1755 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1756 stw r27,FM_ARG0+0x30(r1) ; Save a register
1757 stw r28,FM_ARG0+0x34(r1) ; Save a register
1758 stw r29,FM_ARG0+0x38(r1) ; Save a register
1759 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1760 stw r31,FM_ARG0+0x40(r1) ; Save a register
1761 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1762 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1763
1764 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1765
1766 bl mapPhysLock ; Lock the physent
1767
1768 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1769
1770 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
91447636 1771 li r0,ppFlags ; Set the bottom stuff to clear
55e303ae
A
1772 b hppJoin ; Join the common...
1773
91447636 1774hppSF: li r0,ppLFAmask
55e303ae 1775 ld r12,ppLink(r3) ; Get the pointer to the first mapping
91447636 1776 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
1777
1778hppJoin: andc. r12,r12,r0 ; Clean and test link
1779 beq-- hppNone ; There are no more mappings on physical page
1780
1781 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1782 lhz r7,mpSpace(r12) ; Get the address space hash
1783 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1784 slwi r0,r7,2 ; Multiply space by 4
1785 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1786 slwi r7,r7,3 ; Multiply space by 8
1787 lwz r5,mpVAddr+4(r12) ; and the bottom
1788 add r7,r7,r0 ; Get correct displacement into translate table
1789 lwz r28,0(r28) ; Get the actual translation map
de355530 1790
55e303ae
A
1791 add r28,r28,r7 ; Point to the pmap translation
1792
1793 bl mapPhysUnlock ; Time to unlock the physical entry
1794
1795 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1796
1797 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1798 b hrmJoin ; Go remove the mapping...
1799
1800hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1801 b hrmJoin ; Go remove the mapping...
d7e50217 1802
de355530 1803 .align 5
55e303ae
A
1804
1805hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1806
1807 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1808
1809 mtmsr r11 ; Restore enables/translation/etc.
1810 isync
1811 b hppRetnCmn ; Join the common return code...
1c79356b 1812
55e303ae
A
1813hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1814 isync
1c79356b 1815
55e303ae
A
1816;
1817; NOTE: we have not used any registers other than the volatiles to this point
1818;
1c79356b 1819
55e303ae 1820hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1c79356b 1821
91447636 1822 li r3,mapRtEmpty ; Physent chain is empty
55e303ae
A
1823 mtlr r12 ; Restore the return
1824 lwz r1,0(r1) ; Pop the stack
1825 blr ; Leave...
1c79356b
A
1826
1827/*
55e303ae
A
1828 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1829 *
1830 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1831 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1832 *
1833 * We return the virtual address of the removed mapping as a
1834 * R3.
1835 *
1836 * Note that this is designed to be called from 32-bit mode with a stack.
1837 *
1838 * We disable translation and all interruptions here. This keeps is
1839 * from having to worry about a deadlock due to having anything locked
1840 * and needing it to process a fault.
1841 *
1842 * Note that this must be done with both interruptions off and VM off
1843 *
1844 * Remove a mapping which can be reestablished by VM
1845 *
1c79356b 1846 */
1c79356b 1847
55e303ae
A
1848 .align 5
1849 .globl EXT(hw_purge_map)
1850
1851LEXT(hw_purge_map)
1852 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1853 mflr r0 ; Save the link register
1854 stw r15,FM_ARG0+0x00(r1) ; Save a register
1855 stw r16,FM_ARG0+0x04(r1) ; Save a register
1856 stw r17,FM_ARG0+0x08(r1) ; Save a register
1857 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1858 stw r19,FM_ARG0+0x10(r1) ; Save a register
1859 mfsprg r19,2 ; Get feature flags
1860 stw r20,FM_ARG0+0x14(r1) ; Save a register
1861 stw r21,FM_ARG0+0x18(r1) ; Save a register
1862 mtcrf 0x02,r19 ; move pf64Bit cr6
1863 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1864 stw r23,FM_ARG0+0x20(r1) ; Save a register
1865 stw r24,FM_ARG0+0x24(r1) ; Save a register
1866 stw r25,FM_ARG0+0x28(r1) ; Save a register
1867 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1868 stw r27,FM_ARG0+0x30(r1) ; Save a register
1869 stw r28,FM_ARG0+0x34(r1) ; Save a register
1870 stw r29,FM_ARG0+0x38(r1) ; Save a register
1871 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1872 stw r31,FM_ARG0+0x40(r1) ; Save a register
1873 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1874 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1875
91447636
A
1876#if DEBUG
1877 lwz r11,pmapFlags(r3) ; Get pmaps flags
1878 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1879 bne hpmPanic ; Call not valid for guest shadow assist pmap
1880#endif
1881
55e303ae
A
1882 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1883 lwz r9,pmapvr+4(r3) ; Get conversion mask
1884 b hpmSF1x ; Done...
1885
1886hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1887
1888hpmSF1x:
1889 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1890
1891 xor r28,r3,r9 ; Convert the pmap to physical addressing
1892
1893 mr r17,r11 ; Save the MSR
1894
1895 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1896 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1897 mr. r3,r3 ; Did we get the lock?
1898 bne-- hrmBadLock ; Nope...
1899;
1900; Note that we do a full search (i.e., no shortcut level skips, etc.)
1901; here so that we will know the previous elements so we can dequeue them
1902; later.
1903;
1904hpmSearch:
1905 mr r3,r28 ; Pass in pmap to search
1906 mr r29,r4 ; Top half of vaddr
1907 mr r30,r5 ; Bottom half of vaddr
1908 bl EXT(mapSearchFull) ; Rescan the list
1909 mr. r31,r3 ; Did we? (And remember mapping address for later)
1910 or r0,r4,r5 ; Are we beyond the end?
1911 mr r15,r4 ; Save top of next vaddr
1912 cmplwi cr1,r0,0 ; See if there is another
1913 mr r16,r5 ; Save bottom of next vaddr
1914 bne-- hpmGotOne ; We found one, go check it out...
1915
1916hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1917 b hrmNotFound ; No more in pmap to check...
1918
1919hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
91447636 1920 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
ab86ba33
A
1921 rlwinm r21,r20,8,24,31 ; Extract the busy count
1922 cmplwi cr2,r21,0 ; Is it busy?
1923 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
55e303ae
A
1924 beq++ hrmGotX ; Found, branch to remove the mapping...
1925 b hpmCNext ; Nope...
1c79356b 1926
91447636
A
1927hpmPanic: lis r0,hi16(Choke) ; System abend
1928 ori r0,r0,lo16(Choke) ; System abend
1929 li r3,failMapping ; Show that we failed some kind of mapping thing
1930 sc
1931
55e303ae
A
1932/*
1933 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1934 *
1935 * Upon entry, R3 contains a pointer to a pmap.
1936 * pa is a pointer to the physent
1937 *
1938 * This function removes the first mapping for a specific pmap from a physical entry
1939 * alias list. It locks the list, extracts the vaddr and pmap from
1940 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1941 * NOTE: since we jump into rem_map, we need to set up the stack
1942 * identically. Also, we set the next parm to 0 so we do not
1943 * try to save a next vaddr.
1944 *
1945 * We return the virtual address of the removed mapping as a
1946 * R3.
1947 *
1948 * Note that this is designed to be called from 32-bit mode with a stack.
1949 *
1950 * We disable translation and all interruptions here. This keeps is
1951 * from having to worry about a deadlock due to having anything locked
1952 * and needing it to process a fault.
1953 *
1954 * Note that this must be done with both interruptions off and VM off
1955 *
1956 *
1957 * Remove mapping via physical page (mapping_purge)
1958 *
1959 * 1) lock physent
1960 * 2) extract vaddr and pmap
1961 * 3) unlock physent
1962 * 4) do "remove mapping via pmap"
1963 *
1964 *
1965 */
1c79356b 1966
55e303ae
A
1967 .align 5
1968 .globl EXT(hw_purge_space)
1969
1970LEXT(hw_purge_space)
1971 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1972 mflr r0 ; Save the link register
1973 stw r15,FM_ARG0+0x00(r1) ; Save a register
1974 stw r16,FM_ARG0+0x04(r1) ; Save a register
1975 stw r17,FM_ARG0+0x08(r1) ; Save a register
1976 mfsprg r2,2 ; Get feature flags
1977 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1978 stw r19,FM_ARG0+0x10(r1) ; Save a register
1979 stw r20,FM_ARG0+0x14(r1) ; Save a register
1980 stw r21,FM_ARG0+0x18(r1) ; Save a register
1981 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1982 mtcrf 0x02,r2 ; move pf64Bit cr6
1983 stw r23,FM_ARG0+0x20(r1) ; Save a register
1984 stw r24,FM_ARG0+0x24(r1) ; Save a register
1985 stw r25,FM_ARG0+0x28(r1) ; Save a register
1986 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1987 stw r27,FM_ARG0+0x30(r1) ; Save a register
1988 li r6,0 ; Set no next address return
1989 stw r28,FM_ARG0+0x34(r1) ; Save a register
1990 stw r29,FM_ARG0+0x38(r1) ; Save a register
1991 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1992 stw r31,FM_ARG0+0x40(r1) ; Save a register
1993 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1994 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1995
91447636
A
1996#if DEBUG
1997 lwz r11,pmapFlags(r4) ; Get pmaps flags
1998 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1999 bne hpsPanic ; Call not valid for guest shadow assist pmap
2000#endif
2001
55e303ae
A
2002 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
2003
2004 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
2005
2006 b hpsSF1x ; Done...
2007
2008hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2009
2010hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2011
2012 xor r4,r4,r9 ; Convert the pmap to physical addressing
2013
2014 bl mapPhysLock ; Lock the physent
2015
2016 lwz r8,pmapSpace(r4) ; Get the space hash
2017
2018 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2019
2020 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2021
91447636 2022hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
55e303ae
A
2023 beq hpsNone ; Did not find one...
2024
2025 lhz r10,mpSpace(r12) ; Get the space
2026
2027 cmplw r10,r8 ; Is this one of ours?
2028 beq hpsFnd ; Yes...
2029
2030 lwz r12,mpAlias+4(r12) ; Chain on to the next
2031 b hpsSrc32 ; Check it out...
1c79356b 2032
55e303ae
A
2033 .align 5
2034
91447636 2035hpsSF: li r0,ppLFAmask
55e303ae 2036 ld r12,ppLink(r3) ; Get the pointer to the first mapping
91447636 2037 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
2038
2039hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2040 beq hpsNone ; Did not find one...
2041
2042 lhz r10,mpSpace(r12) ; Get the space
2043
2044 cmplw r10,r8 ; Is this one of ours?
2045 beq hpsFnd ; Yes...
2046
2047 ld r12,mpAlias(r12) ; Chain on to the next
2048 b hpsSrc64 ; Check it out...
2049
2050 .align 5
1c79356b 2051
55e303ae
A
2052hpsFnd: mr r28,r4 ; Set the pmap physical address
2053 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2054 lwz r5,mpVAddr+4(r12) ; and the bottom
2055
2056 bl mapPhysUnlock ; Time to unlock the physical entry
2057 b hrmJoin ; Go remove the mapping...
2058
2059 .align 5
2060
2061hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 2062
55e303ae 2063 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 2064
55e303ae
A
2065 mtmsr r11 ; Restore enables/translation/etc.
2066 isync
2067 b hpsRetnCmn ; Join the common return code...
1c79356b 2068
55e303ae
A
2069hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2070 isync
1c79356b 2071
55e303ae
A
2072;
2073; NOTE: we have not used any registers other than the volatiles to this point
2074;
d7e50217 2075
55e303ae
A
2076hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2077
91447636 2078 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
55e303ae
A
2079 mtlr r12 ; Restore the return
2080 lwz r1,0(r1) ; Pop the stack
2081 blr ; Leave...
1c79356b 2082
91447636
A
2083hpsPanic: lis r0,hi16(Choke) ; System abend
2084 ori r0,r0,lo16(Choke) ; System abend
2085 li r3,failMapping ; Show that we failed some kind of mapping thing
2086 sc
2087
2088/*
2089 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2090 * on this physent chain
2091 *
2092 * Locates the first guest mapping on the physent chain that is associated with the
2093 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2094 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2095 * repeatedly until no additional guest mappings that match our criteria are removed.
2096 *
2097 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2098 *
2099 * Parameters:
2100 * r3 : physent, 32-bit kernel virtual address
2101 * r4 : host pmap, 32-bit kernel virtual address
2102 *
2103 * Volatile register usage (for linkage through hrmJoin):
2104 * r4 : high-order 32 bits of guest virtual address
2105 * r5 : low-order 32 bits of guest virtual address
2106 * r11: saved MSR image
2107 *
2108 * Non-volatile register usage:
2109 * r26: VMM extension block's physical address
2110 * r27: host pmap's physical address
2111 * r28: guest pmap's physical address
2112 *
2113 */
2114
2115 .align 5
2116 .globl EXT(hw_scrub_guest)
2117
2118LEXT(hw_scrub_guest)
2119 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2120 mflr r0 ; Save the link register
2121 stw r15,FM_ARG0+0x00(r1) ; Save a register
2122 stw r16,FM_ARG0+0x04(r1) ; Save a register
2123 stw r17,FM_ARG0+0x08(r1) ; Save a register
2124 mfsprg r2,2 ; Get feature flags
2125 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2126 stw r19,FM_ARG0+0x10(r1) ; Save a register
2127 stw r20,FM_ARG0+0x14(r1) ; Save a register
2128 stw r21,FM_ARG0+0x18(r1) ; Save a register
2129 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2130 mtcrf 0x02,r2 ; move pf64Bit cr6
2131 stw r23,FM_ARG0+0x20(r1) ; Save a register
2132 stw r24,FM_ARG0+0x24(r1) ; Save a register
2133 stw r25,FM_ARG0+0x28(r1) ; Save a register
2134 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2135 stw r27,FM_ARG0+0x30(r1) ; Save a register
2136 li r6,0 ; Set no next address return
2137 stw r28,FM_ARG0+0x34(r1) ; Save a register
2138 stw r29,FM_ARG0+0x38(r1) ; Save a register
2139 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2140 stw r31,FM_ARG0+0x40(r1) ; Save a register
2141 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2142 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2143
2144 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2145
2146 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2147 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2148 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2149 b hsgStart ; Get to work
2150
2151hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2152 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2153
2154hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2155 xor r27,r4,r9 ; Convert host pmap_t virt->real
2156 bl mapPhysLock ; Lock the physent
2157
2158 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2159
2160 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2161hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2162 beq hsg32Miss ; Did not find one...
2163 lwz r8,mpFlags(r12) ; Get mapping's flags
2164 lhz r7,mpSpace(r12) ; Get mapping's space id
2165 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2166 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2167 xori r8,r8,mpGuest ; Is it a guest mapping?
2168 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2169 slwi r9,r7,2 ; Multiply space by 4
2170 lwz r28,0(r28) ; Get the actual translation map
2171 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2172 slwi r7,r7,3 ; Multiply space by 8
2173 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2174 add r7,r7,r9 ; Get correct displacement into translate table
2175 add r28,r28,r7 ; Point to the pmap translation
2176 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2177 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2178 xor r7,r7,r26 ; Is guest associated with specified host?
2179 or. r7,r7,r8 ; Guest mapping && associated with host?
2180 lwz r12,mpAlias+4(r12) ; Chain on to the next
2181 bne hsg32Loop ; Try next mapping on alias chain
2182
2183hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2184 b hrmJoin ; Join common path for mapping removal
2185
2186 .align 5
2187hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2188 mtmsr r11 ; Restore 'rupts, translation
2189 isync ; Throw a small wrench into the pipeline
2190 li r3,mapRtEmpty ; No mappings found matching specified criteria
2191 b hrmRetnCmn ; Exit through common epilog
2192
2193 .align 5
2194hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2195 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2196 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2197hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2198 beq hsg64Miss ; Did not find one...
2199 lwz r8,mpFlags(r12) ; Get mapping's flags
2200 lhz r7,mpSpace(r12) ; Get mapping's space id
2201 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2202 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2203 xori r8,r8,mpGuest ; Is it a guest mapping?
2204 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2205 slwi r9,r7,2 ; Multiply space by 4
2206 lwz r28,0(r28) ; Get the actual translation map
2207 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2208 slwi r7,r7,3 ; Multiply space by 8
2209 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2210 add r7,r7,r9 ; Get correct displacement into translate table
2211 add r28,r28,r7 ; Point to the pmap translation
2212 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2213 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2214 xor r7,r7,r26 ; Is guest associated with specified host?
2215 or. r7,r7,r8 ; Guest mapping && associated with host?
2216 ld r12,mpAlias(r12) ; Chain on to the next
2217 bne hsg64Loop ; Try next mapping on alias chain
2218
2219hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2220 b hrmJoin ; Join common path for mapping removal
2221
2222 .align 5
2223hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
b36670ce 2224 mtmsrd r11 ; Restore 'rupts, translation
91447636
A
2225 li r3,mapRtEmpty ; No mappings found matching specified criteria
2226 b hrmRetnCmn ; Exit through common epilog
2227
1c79356b
A
2228
2229/*
55e303ae
A
2230 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2231 *
2232 * Upon entry, R3 contains a pointer to a physent.
2233 * space is the space ID from the pmap in question
2234 *
2235 * We return the virtual address of the found mapping in
2236 * R3. Note that the mapping busy is bumped.
2237 *
2238 * Note that this is designed to be called from 32-bit mode with a stack.
2239 *
2240 * We disable translation and all interruptions here. This keeps is
2241 * from having to worry about a deadlock due to having anything locked
2242 * and needing it to process a fault.
2243 *
1c79356b
A
2244 */
2245
2246 .align 5
55e303ae
A
2247 .globl EXT(hw_find_space)
2248
2249LEXT(hw_find_space)
2250 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2251 mflr r0 ; Save the link register
2252 mr r8,r4 ; Remember the space
2253 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2254
2255 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1c79356b 2256
55e303ae 2257 bl mapPhysLock ; Lock the physent
1c79356b 2258
55e303ae
A
2259 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2260
2261 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
d7e50217 2262
91447636 2263hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
55e303ae
A
2264 beq hfsNone ; Did not find one...
2265
2266 lhz r10,mpSpace(r12) ; Get the space
2267
2268 cmplw r10,r8 ; Is this one of ours?
2269 beq hfsFnd ; Yes...
2270
2271 lwz r12,mpAlias+4(r12) ; Chain on to the next
2272 b hfsSrc32 ; Check it out...
1c79356b 2273
55e303ae
A
2274 .align 5
2275
91447636 2276hfsSF: li r0,ppLFAmask
55e303ae 2277 ld r12,ppLink(r3) ; Get the pointer to the first mapping
91447636 2278 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
2279
2280hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2281 beq hfsNone ; Did not find one...
2282
2283 lhz r10,mpSpace(r12) ; Get the space
2284
2285 cmplw r10,r8 ; Is this one of ours?
2286 beq hfsFnd ; Yes...
2287
2288 ld r12,mpAlias(r12) ; Chain on to the next
2289 b hfsSrc64 ; Check it out...
2290
2291 .align 5
2292
2293hfsFnd: mr r8,r3 ; Save the physent
2294 mr r3,r12 ; Point to the mapping
2295 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 2296
55e303ae
A
2297 mr r3,r8 ; Get back the physical entry
2298 li r7,0xFFF ; Get a page size mask
2299 bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 2300
55e303ae
A
2301 andc r3,r12,r7 ; Move the mapping back down to a page
2302 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2303 xor r12,r3,r12 ; Convert to virtual
2304 b hfsRet ; Time to return
2305
2306 .align 5
2307
2308hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2309
2310hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 2311
55e303ae
A
2312 mtmsr r11 ; Restore enables/translation/etc.
2313 isync
2314 b hfsRetnCmn ; Join the common return code...
1c79356b 2315
55e303ae
A
2316hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2317 isync
1c79356b 2318
55e303ae
A
2319;
2320; NOTE: we have not used any registers other than the volatiles to this point
2321;
1c79356b 2322
55e303ae 2323hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
91447636
A
2324
2325#if DEBUG
2326 mr. r3,r3 ; Anything to return?
2327 beq hfsRetnNull ; Nope
2328 lwz r11,mpFlags(r3) ; Get mapping flags
2329 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2330 cmplwi r0,mpGuest ; Shadow guest mapping?
2331 beq hfsPanic ; Yup, kick the bucket
2332hfsRetnNull:
2333#endif
2334
55e303ae
A
2335 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2336
2337 mtlr r12 ; Restore the return
2338 lwz r1,0(r1) ; Pop the stack
2339 blr ; Leave...
1c79356b 2340
91447636
A
2341hfsPanic: lis r0,hi16(Choke) ; System abend
2342 ori r0,r0,lo16(Choke) ; System abend
2343 li r3,failMapping ; Show that we failed some kind of mapping thing
2344 sc
1c79356b 2345
55e303ae
A
2346;
2347; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2348; Returns 0 if not found or the virtual address of the mapping if
2349; if is. Also, the mapping has the busy count bumped.
2350;
2351 .align 5
2352 .globl EXT(hw_find_map)
1c79356b 2353
55e303ae
A
2354LEXT(hw_find_map)
2355 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2356 mflr r0 ; Save the link register
2357 stw r25,FM_ARG0+0x00(r1) ; Save a register
2358 stw r26,FM_ARG0+0x04(r1) ; Save a register
2359 mr r25,r6 ; Remember address of next va
2360 stw r27,FM_ARG0+0x08(r1) ; Save a register
2361 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2362 stw r29,FM_ARG0+0x10(r1) ; Save a register
2363 stw r30,FM_ARG0+0x14(r1) ; Save a register
2364 stw r31,FM_ARG0+0x18(r1) ; Save a register
2365 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 2366
91447636
A
2367#if DEBUG
2368 lwz r11,pmapFlags(r3) ; Get pmaps flags
2369 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2370 bne hfmPanic ; Call not valid for guest shadow assist pmap
2371#endif
2372
55e303ae
A
2373 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2374 lwz r7,pmapvr+4(r3) ; Get the second part
1c79356b 2375
1c79356b 2376
55e303ae
A
2377 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2378
2379 mr r27,r11 ; Remember the old MSR
2380 mr r26,r12 ; Remember the feature bits
9bccf70c 2381
55e303ae 2382 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2383
55e303ae 2384 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
1c79356b 2385
55e303ae 2386 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 2387
55e303ae
A
2388hfmSF1: mr r29,r4 ; Save top half of vaddr
2389 mr r30,r5 ; Save the bottom half
2390
2391 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2392 bl sxlkShared ; Go get a shared lock on the mapping lists
2393 mr. r3,r3 ; Did we get the lock?
2394 bne-- hfmBadLock ; Nope...
1c79356b 2395
55e303ae
A
2396 mr r3,r28 ; get the pmap address
2397 mr r4,r29 ; Get bits 0:31 to look for
2398 mr r5,r30 ; Get bits 32:64
2399
2400 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
1c79356b 2401
55e303ae
A
2402 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2403 mr. r31,r3 ; Save the mapping if we found it
2404 cmplwi cr1,r0,0 ; Are we removing?
2405 mr r29,r4 ; Save next va high half
2406 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2407 mr r30,r5 ; Save next va low half
2408 li r6,0 ; Assume we did not find it
2409 li r26,0xFFF ; Get a mask to relocate to start of mapping page
1c79356b 2410
55e303ae 2411 bt-- cr0_eq,hfmNotFnd ; We did not find it...
1c79356b 2412
55e303ae 2413 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 2414
55e303ae 2415 andc r4,r31,r26 ; Get back to the mapping page start
1c79356b 2416
55e303ae
A
2417; Note: we can treat 32- and 64-bit the same here. Because we are going from
2418; physical to virtual and we only do 32-bit virtual, we only need the low order
2419; word of the xor.
d7e50217 2420
55e303ae
A
2421 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2422 li r6,-1 ; Indicate we found it and it is not being removed
2423 xor r31,r31,r4 ; Flip to virtual
d7e50217 2424
55e303ae
A
2425hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2426 bl sxlkUnlock ; Unlock the search list
d7e50217 2427
55e303ae
A
2428 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2429 and r3,r3,r6 ; Clear if not found or removing
de355530 2430
55e303ae 2431hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
de355530 2432
55e303ae
A
2433 mtmsr r27 ; Restore enables/translation/etc.
2434 isync
2435 b hfmReturnC ; Join common...
2436
2437hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2438 isync
2439
2440hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2441 stw r30,4(r25) ; Save the bottom of the next va
2442 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2443 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2444 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2445 and r3,r3,r6 ; Clear return if the mapping is being removed
2446 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2447 mtlr r0 ; Restore the return
2448 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2449 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2450 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2451 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2452 lwz r1,0(r1) ; Pop the stack
2453 blr ; Leave...
2454
2455 .align 5
2456
2457hfmBadLock: li r3,1 ; Set lock time out error code
2458 b hfmReturn ; Leave....
1c79356b 2459
91447636
A
2460hfmPanic: lis r0,hi16(Choke) ; System abend
2461 ori r0,r0,lo16(Choke) ; System abend
2462 li r3,failMapping ; Show that we failed some kind of mapping thing
2463 sc
2464
2465
2466/*
2467 * void hw_clear_maps(void)
2468 *
2469 * Remove all mappings for all phys entries.
2470 *
2471 *
2472 */
2473
2474 .align 5
2475 .globl EXT(hw_clear_maps)
2476
2477LEXT(hw_clear_maps)
2478 mflr r10 ; Save the link register
2479 mfcr r9 ; Save the condition register
2480 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2481
2482 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2483 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2484
2485hcmNextRegion:
2486 lwz r3,mrPhysTab(r5) ; Get the actual table address
2487 lwz r0,mrStart(r5) ; Get start of table entry
2488 lwz r4,mrEnd(r5) ; Get end of table entry
2489 addi r5,r5,mrSize ; Point to the next regions
2490
2491 cmplwi r3,0 ; No more regions?
2492 beq-- hcmDone ; Leave...
2493
2494 sub r4,r4,r0 ; Calculate physical entry count
2495 addi r4,r4,1
2496 mtctr r4
2497
2498 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2499
2500
2501hcmNextPhys32:
2502 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2503 addi r3,r3,physEntrySize ; Next phys_entry
2504
2505hcmNextMap32:
3a60a9f5 2506 rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address
91447636
A
2507 beq hcmNoMap32 ; Did not find one...
2508
2509 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2510 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2511 stw r0,mpPte(r4) ; Get the quick pointer again
2512
2513 lwz r4,mpAlias+4(r4) ; Chain on to the next
2514 b hcmNextMap32 ; Check it out...
2515hcmNoMap32:
2516 bdnz hcmNextPhys32
2517 b hcmNextRegion
2518
2519
2520 .align 5
2521hcmNextPhys64:
2522 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2523 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2524 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2525 addi r3,r3,physEntrySize ; Next phys_entry
2526
2527hcmNextMap64:
2528 andc. r4,r4,r0 ; Clean and test mapping address
2529 beq hcmNoMap64 ; Did not find one...
2530
2531 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2532 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2533 stw r0,mpPte(r4) ; Get the quick pointer again
2534
2535 ld r4,mpAlias(r4) ; Chain on to the next
2536 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2537 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2538 b hcmNextMap64 ; Check it out...
2539hcmNoMap64:
2540 bdnz hcmNextPhys64
2541 b hcmNextRegion
2542
2543
2544 .align 5
2545hcmDone:
2546 mtlr r10 ; Restore the return
2547 mtcr r9 ; Restore the condition register
2548 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2549hcmDone32:
2550 mtmsr r11 ; Restore translation/mode/etc.
2551 isync
2552 blr ; Leave...
2553
2554hcmDone64:
2555 mtmsrd r11 ; Restore translation/mode/etc.
2556 isync
2557 blr ; Leave...
2558
2559
1c79356b
A
2560
2561/*
91447636 2562 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
55e303ae
A
2563 * walks all mapping for a physical page and performs
2564 * specified operations on each.
1c79356b 2565 *
55e303ae
A
2566 * pp is unlocked physent
2567 * preop is operation to perform on physent before walk. This would be
2568 * used to set cache attribute or protection
2569 * op is the operation to perform on each mapping during walk
2570 * postop is operation to perform in the phsyent after walk. this would be
2571 * used to set or reset the RC bits.
91447636
A
2572 * opmod modifies the action taken on any connected PTEs visited during
2573 * the mapping walk.
55e303ae
A
2574 *
2575 * We return the RC bits from before postop is run.
2576 *
2577 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 2578 *
55e303ae
A
2579 * We disable translation and all interruptions here. This keeps is
2580 * from having to worry about a deadlock due to having anything locked
2581 * and needing it to process a fault.
d7e50217 2582 *
55e303ae
A
2583 * We lock the physent, execute preop, and then walk each mapping in turn.
2584 * If there is a PTE, it is invalidated and the RC merged into the physent.
2585 * Then we call the op function.
2586 * Then we revalidate the PTE.
2587 * Once all all mappings are finished, we save the physent RC and call the
2588 * postop routine. Then we unlock the physent and return the RC.
2589 *
2590 *
1c79356b
A
2591 */
2592
1c79356b 2593 .align 5
55e303ae
A
2594 .globl EXT(hw_walk_phys)
2595
2596LEXT(hw_walk_phys)
91447636 2597 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
55e303ae 2598 mflr r0 ; Save the link register
91447636
A
2599 stw r24,FM_ARG0+0x00(r1) ; Save a register
2600 stw r25,FM_ARG0+0x04(r1) ; Save a register
2601 stw r26,FM_ARG0+0x08(r1) ; Save a register
2602 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2603 mr r24,r8 ; Save the parm
55e303ae 2604 mr r25,r7 ; Save the parm
91447636
A
2605 stw r28,FM_ARG0+0x10(r1) ; Save a register
2606 stw r29,FM_ARG0+0x14(r1) ; Save a register
2607 stw r30,FM_ARG0+0x18(r1) ; Save a register
2608 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2609 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
55e303ae
A
2610
2611 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
91447636
A
2612
2613 mfsprg r26,0 ; (INSTRUMENTATION)
2614 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2615 addi r27,r27,1 ; (INSTRUMENTATION)
2616 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2617 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2618 slwi r12,r24,2 ; (INSTRUMENTATION)
2619 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2620 addi r27,r27,1 ; (INSTRUMENTATION)
2621 stwx r27,r26,r12 ; (INSTRUMENTATION)
55e303ae
A
2622
2623 mr r26,r11 ; Save the old MSR
2624 lis r27,hi16(hwpOpBase) ; Get high order of op base
2625 slwi r4,r4,7 ; Convert preop to displacement
2626 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2627 slwi r5,r5,7 ; Convert op to displacement
2628 add r12,r4,r27 ; Point to the preop routine
2629 slwi r28,r6,7 ; Convert postop to displacement
2630 mtctr r12 ; Set preop routine
2631 add r28,r28,r27 ; Get the address of the postop routine
2632 add r27,r5,r27 ; Get the address of the op routine
1c79356b 2633
55e303ae 2634 bl mapPhysLock ; Lock the physent
1c79356b 2635
55e303ae
A
2636 mr r29,r3 ; Save the physent address
2637
2638 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2639
2640 bctrl ; Call preop routine
2641 bne- hwpEarly32 ; preop says to bail now...
91447636
A
2642
2643 cmplwi r24,hwpMergePTE ; Classify operation modifier
55e303ae
A
2644 mtctr r27 ; Set up the op function address
2645 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
91447636
A
2646 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2647 beq hwpMSrc32 ; Do TLB merge for each mapping
2648
3a60a9f5 2649hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
91447636 2650 beq hwpNone32 ; Did not find one...
55e303ae 2651
91447636
A
2652 bctrl ; Call the op function
2653
2654 bne- hwpEarly32 ; op says to bail now...
2655 lwz r31,mpAlias+4(r31) ; Chain on to the next
2656 b hwpQSrc32 ; Check it out...
2657
2658 .align 5
3a60a9f5 2659hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
55e303ae 2660 beq hwpNone32 ; Did not find one...
91447636
A
2661
2662 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2663 bctrl ; Call the op function
2664
2665 bne- hwpEarly32 ; op says to bail now...
2666 lwz r31,mpAlias+4(r31) ; Chain on to the next
2667 b hwpMSrc32 ; Check it out...
d7e50217 2668
91447636
A
2669 .align 5
2670hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2671 beq hwpNone32 ; Did not find one...
2672
55e303ae
A
2673;
2674; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2675; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2676; If there is no PTE, PTE low is obtained from mapping
2677;
2678 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2679
2680 bctrl ; Call the op function
2681
2682 crmove cr1_eq,cr0_eq ; Save the return code
2683
2684 mr. r3,r3 ; Was there a previously valid PTE?
2685 beq- hwpNxt32 ; Nope...
1c79356b 2686
55e303ae
A
2687 stw r5,4(r3) ; Store second half of PTE
2688 eieio ; Make sure we do not reorder
2689 stw r4,0(r3) ; Revalidate the PTE
2690
2691 eieio ; Make sure all updates come first
2692 stw r6,0(r7) ; Unlock the PCA
d7e50217 2693
55e303ae
A
2694hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2695 lwz r31,mpAlias+4(r31) ; Chain on to the next
2696 b hwpSrc32 ; Check it out...
1c79356b 2697
55e303ae 2698 .align 5
1c79356b 2699
55e303ae 2700hwpNone32: mtctr r28 ; Get the post routine address
1c79356b 2701
55e303ae
A
2702 lwz r30,ppLink+4(r29) ; Save the old RC
2703 mr r3,r29 ; Get the physent address
2704 bctrl ; Call post routine
1c79356b 2705
55e303ae
A
2706 bl mapPhysUnlock ; Unlock the physent
2707
2708 mtmsr r26 ; Restore translation/mode/etc.
2709 isync
1c79356b 2710
55e303ae 2711 b hwpReturn ; Go restore registers and return...
1c79356b 2712
55e303ae 2713 .align 5
1c79356b 2714
55e303ae
A
2715hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2716 mr r3,r29 ; Get the physent address
2717 bl mapPhysUnlock ; Unlock the physent
2718
2719 mtmsr r26 ; Restore translation/mode/etc.
2720 isync
2721
2722 b hwpReturn ; Go restore registers and return...
1c79356b 2723
55e303ae 2724 .align 5
1c79356b 2725
55e303ae
A
2726hwp64: bctrl ; Call preop routine
2727 bne-- hwpEarly64 ; preop says to bail now...
d7e50217 2728
91447636 2729 cmplwi r24,hwpMergePTE ; Classify operation modifier
55e303ae
A
2730 mtctr r27 ; Set up the op function address
2731
91447636 2732 li r24,ppLFAmask
55e303ae 2733 ld r31,ppLink(r3) ; Get the pointer to the first mapping
91447636
A
2734 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2735 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2736 beq hwpMSrc64 ; Do TLB merge for each mapping
55e303ae 2737
91447636
A
2738hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2739 beq hwpNone64 ; Did not find one...
2740
2741 bctrl ; Call the op function
2742
2743 bne-- hwpEarly64 ; op says to bail now...
2744 ld r31,mpAlias(r31) ; Chain on to the next
2745 b hwpQSrc64 ; Check it out...
2746
2747 .align 5
2748hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2749 beq hwpNone64 ; Did not find one...
2750
2751 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2752 bctrl ; Call the op function
2753
2754 bne-- hwpEarly64 ; op says to bail now...
2755 ld r31,mpAlias(r31) ; Chain on to the next
2756 b hwpMSrc64 ; Check it out...
2757
2758 .align 5
2759hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
55e303ae
A
2760 beq hwpNone64 ; Did not find one...
2761;
2762; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2763; PTE low in R5. PTEG comes back locked if there is one
2764;
2765 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
1c79356b 2766
55e303ae 2767 bctrl ; Call the op function
1c79356b 2768
55e303ae 2769 crmove cr1_eq,cr0_eq ; Save the return code
1c79356b 2770
55e303ae
A
2771 mr. r3,r3 ; Was there a previously valid PTE?
2772 beq-- hwpNxt64 ; Nope...
2773
2774 std r5,8(r3) ; Save bottom of PTE
2775 eieio ; Make sure we do not reorder
2776 std r4,0(r3) ; Revalidate the PTE
d7e50217 2777
55e303ae
A
2778 eieio ; Make sure all updates come first
2779 stw r6,0(r7) ; Unlock the PCA
2780
2781hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2782 ld r31,mpAlias(r31) ; Chain on to the next
55e303ae 2783 b hwpSrc64 ; Check it out...
1c79356b 2784
55e303ae
A
2785 .align 5
2786
2787hwpNone64: mtctr r28 ; Get the post routine address
2788
2789 lwz r30,ppLink+4(r29) ; Save the old RC
2790 mr r3,r29 ; Get the physent address
2791 bctrl ; Call post routine
2792
2793 bl mapPhysUnlock ; Unlock the physent
2794
2795 mtmsrd r26 ; Restore translation/mode/etc.
1c79356b 2796 isync
55e303ae
A
2797 b hwpReturn ; Go restore registers and return...
2798
2799 .align 5
1c79356b 2800
55e303ae
A
2801hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2802 mr r3,r29 ; Get the physent address
2803 bl mapPhysUnlock ; Unlock the physent
2804
2805 mtmsrd r26 ; Restore translation/mode/etc.
2806 isync
2807
91447636
A
2808hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2809 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2810 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2811 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
55e303ae 2812 mr r3,r30 ; Pass back the RC
91447636
A
2813 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2814 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
55e303ae 2815 mtlr r0 ; Restore the return
91447636
A
2816 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2817 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2818 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
55e303ae
A
2819 lwz r1,0(r1) ; Pop the stack
2820 blr ; Leave...
d7e50217 2821
d7e50217 2822
55e303ae
A
2823;
2824; The preop/op/postop function table.
2825; Each function must be 64-byte aligned and be no more than
2826; 16 instructions. If more than 16, we must fix address calculations
2827; at the start of hwpOpBase
2828;
2829; The routine must set CR0_EQ in order to continue scan.
2830; If CR0_EQ is not set, an early return from the function is made.
2831;
d7e50217 2832
55e303ae
A
2833 .align 7
2834
2835hwpOpBase:
2836
2837; Function 0 - No operation
2838
2839hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2840 blr ; Just return...
1c79356b
A
2841
2842 .align 5
1c79356b 2843
55e303ae 2844; This is the continuation of function 4 - Set attributes in mapping
1c79356b 2845
55e303ae
A
2846; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2847; NOTE: Do we have to deal with i-cache here?
2848
91447636 2849hwpSAM: li r11,4096 ; Get page size
d7e50217 2850
55e303ae
A
2851hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2852 dcbf r11,r5 ; Flush the line in the data cache
2853 bgt++ hwpSAMinvd ; Go do the rest of it...
2854
2855 sync ; Make sure it is done
1c79356b 2856
91447636 2857 li r11,4096 ; Get page size
55e303ae
A
2858
2859hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2860 icbi r11,r5 ; Flush the line in the icache
2861 bgt++ hwpSAMinvi ; Go do the rest of it...
2862
2863 sync ; Make sure it is done
1c79356b 2864
55e303ae
A
2865 cmpw r0,r0 ; Make sure we return CR0_EQ
2866 blr ; Return...
1c79356b 2867
1c79356b 2868
91447636 2869; Function 1 - Set protection in physent (obsolete)
1c79356b 2870
55e303ae
A
2871 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2872
91447636 2873hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
55e303ae 2874 blr ; Return...
1c79356b 2875
1c79356b 2876
55e303ae 2877; Function 2 - Set protection in mapping
1c79356b 2878
5d5c5d0d
A
2879; NOTE: Changes to no-execute permission are ignored
2880
55e303ae 2881 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
1c79356b 2882
55e303ae
A
2883hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2884 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2885 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
5d5c5d0d 2886 li r0,lo16(mpPP) ; Get protection bits
55e303ae 2887 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
5d5c5d0d 2888 rlwinm r2,r25,0,mpPP ; Isolate new protection bits
55e303ae 2889 beqlr-- ; Leave if permanent mapping (before we trash R5)...
5d5c5d0d
A
2890 andc r5,r5,r0 ; Clear the old prot bits
2891 or r5,r5,r2 ; Move in the new prot bits
55e303ae
A
2892 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2893 cmpw r0,r0 ; Make sure we return CR0_EQ
2894 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2895 blr ; Leave...
2896
2897; Function 3 - Set attributes in physent
1c79356b 2898
55e303ae 2899 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
1c79356b 2900
91447636 2901hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
1c79356b 2902
55e303ae 2903hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
91447636 2904 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
55e303ae
A
2905 stwcx. r4,r5,r29 ; Try to stuff it
2906 bne-- hwpSAtrPhX ; Try again...
2907; Note: CR0_EQ is set because of stwcx.
2908 blr ; Return...
de355530 2909
55e303ae 2910; Function 4 - Set attributes in mapping
d7e50217 2911
55e303ae
A
2912 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2913
2914hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2915 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
91447636 2916 li r2,mpM ; Force on coherent
55e303ae
A
2917 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2918 li r0,lo16(mpWIMG) ; Get wimg mask
2919 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
91447636
A
2920 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2921 ; Copy in the cache inhibited bit
55e303ae
A
2922 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2923 andc r5,r5,r0 ; Clear the old wimg
91447636
A
2924 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2925 ; Copy in the guarded bit
55e303ae
A
2926 mfsprg r9,2 ; Feature flags
2927 or r5,r5,r2 ; Move in the new wimg
2928 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2929 lwz r2,mpPAddr(r31) ; Get the physical address
2930 li r0,0xFFF ; Start a mask
2931 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2932 rlwinm r5,r0,0,1,0 ; Copy to top half
2933 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2934 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2935 and r5,r5,r2 ; Clean stuff in top 32 bits
2936 andc r2,r2,r0 ; Clean bottom too
2937 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2938 b hwpSAM ; Join common
1c79356b 2939
55e303ae
A
2940; NOTE: we moved the remainder of the code out of here because it
2941; did not fit in the 128 bytes allotted. It got stuck into the free space
2942; at the end of the no-op function.
2943
2944
2945
de355530 2946
55e303ae 2947; Function 5 - Clear reference in physent
1c79356b 2948
55e303ae 2949 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
1c79356b 2950
55e303ae 2951hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2952
55e303ae 2953hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
91447636 2954 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
55e303ae
A
2955 stwcx. r4,r5,r29 ; Try to stuff it
2956 bne-- hwpCRefPhX ; Try again...
2957; Note: CR0_EQ is set because of stwcx.
2958 blr ; Return...
1c79356b
A
2959
2960
55e303ae 2961; Function 6 - Clear reference in mapping
1c79356b 2962
55e303ae 2963 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
1c79356b 2964
55e303ae
A
2965hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2966 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2967 andc r5,r5,r0 ; Clear in PTE copy
2968 andc r8,r8,r0 ; and in the mapping
2969 cmpw r0,r0 ; Make sure we return CR0_EQ
2970 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2971 blr ; Return...
1c79356b 2972
de355530 2973
55e303ae 2974; Function 7 - Clear change in physent
1c79356b 2975
55e303ae 2976 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
1c79356b 2977
55e303ae 2978hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2979
55e303ae
A
2980hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2981 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2982 stwcx. r4,r5,r29 ; Try to stuff it
2983 bne-- hwpCCngPhX ; Try again...
2984; Note: CR0_EQ is set because of stwcx.
2985 blr ; Return...
1c79356b 2986
de355530 2987
55e303ae 2988; Function 8 - Clear change in mapping
1c79356b 2989
55e303ae
A
2990 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2991
2992hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2993 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2994 andc r5,r5,r0 ; Clear in PTE copy
2995 andc r8,r8,r0 ; and in the mapping
2996 cmpw r0,r0 ; Make sure we return CR0_EQ
2997 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2998 blr ; Return...
d7e50217 2999
de355530 3000
55e303ae 3001; Function 9 - Set reference in physent
d7e50217 3002
55e303ae 3003 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
d7e50217 3004
55e303ae
A
3005hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3006
3007hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
3008 ori r4,r4,lo16(ppR) ; Set the reference
3009 stwcx. r4,r5,r29 ; Try to stuff it
3010 bne-- hwpSRefPhX ; Try again...
3011; Note: CR0_EQ is set because of stwcx.
3012 blr ; Return...
d7e50217 3013
1c79356b 3014
55e303ae 3015; Function 10 - Set reference in mapping
d7e50217 3016
55e303ae
A
3017 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3018
3019hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
55e303ae
A
3020 ori r8,r8,lo16(mpR) ; Set reference in mapping
3021 cmpw r0,r0 ; Make sure we return CR0_EQ
3022 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3023 blr ; Return...
3024
3025; Function 11 - Set change in physent
1c79356b 3026
55e303ae 3027 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
1c79356b 3028
55e303ae 3029hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 3030
55e303ae
A
3031hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3032 ori r4,r4,lo16(ppC) ; Set the change bit
3033 stwcx. r4,r5,r29 ; Try to stuff it
3034 bne-- hwpSCngPhX ; Try again...
3035; Note: CR0_EQ is set because of stwcx.
3036 blr ; Return...
de355530 3037
55e303ae 3038; Function 12 - Set change in mapping
1c79356b 3039
55e303ae 3040 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
1c79356b 3041
55e303ae 3042hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
55e303ae
A
3043 ori r8,r8,lo16(mpC) ; Set chage in mapping
3044 cmpw r0,r0 ; Make sure we return CR0_EQ
3045 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3046 blr ; Return...
1c79356b 3047
55e303ae 3048; Function 13 - Test reference in physent
1c79356b 3049
55e303ae
A
3050 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3051
3052hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3053 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3054 blr ; Return (CR0_EQ set to continue if reference is off)...
1c79356b 3055
1c79356b 3056
55e303ae 3057; Function 14 - Test reference in mapping
1c79356b 3058
55e303ae 3059 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
de355530 3060
55e303ae
A
3061hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3062 blr ; Return (CR0_EQ set to continue if reference is off)...
3063
91447636 3064
55e303ae 3065; Function 15 - Test change in physent
1c79356b 3066
55e303ae 3067 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
1c79356b 3068
55e303ae
A
3069hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3070 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
91447636 3071 blr ; Return (CR0_EQ set to continue if change is off)...
55e303ae
A
3072
3073
3074; Function 16 - Test change in mapping
3075
3076 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
d7e50217 3077
55e303ae 3078hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
91447636
A
3079 blr ; Return (CR0_EQ set to continue if change is off)...
3080
3081
3082; Function 17 - Test reference and change in physent
55e303ae
A
3083
3084 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3085
91447636
A
3086hwpTRefCngPhy:
3087 lwz r0,ppLink+4(r29) ; Get the flags from physent
3088 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3089 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3090 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3091 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3092
3093
3094; Function 18 - Test reference and change in mapping
3095
3096 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3097hwpTRefCngMap:
3098 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3099 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3100 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3101 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3102
3103
3104; Function 19 - Clear reference and change in physent
3105
3106 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3107hwpCRefCngPhy:
3108 li r5,ppLink+4 ; Get offset for flag part of physent
3109
3110hwpCRefCngPhX:
3111 lwarx r4,r5,r29 ; Get the old flags
3112 andc r4,r4,r25 ; Clear R and C as specified by mask
3113 stwcx. r4,r5,r29 ; Try to stuff it
3114 bne-- hwpCRefCngPhX ; Try again...
3115; Note: CR0_EQ is set because of stwcx.
3116 blr ; Return...
3117
3118
3119; Function 20 - Clear reference and change in mapping
3120
3121 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3122hwpCRefCngMap:
3123 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3124 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3125 andc r5,r5,r0 ; Clear in PTE copy
3126 andc r8,r8,r0 ; and in the mapping
3127 cmpw r0,r0 ; Make sure we return CR0_EQ
3128 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3129 blr ; Return...
3130
d7e50217 3131
91447636 3132 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
d7e50217 3133
de355530 3134;
91447636 3135; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
55e303ae
A
3136;
3137; Returns:
3138; mapRtOK - if all is ok
3139; mapRtBadLk - if mapping lock fails
3140; mapRtPerm - if mapping is permanent
3141; mapRtNotFnd - if mapping is not found
3142; mapRtBlock - if mapping is a block
de355530 3143;
55e303ae
A
3144 .align 5
3145 .globl EXT(hw_protect)
d7e50217 3146
55e303ae
A
3147LEXT(hw_protect)
3148 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3149 mflr r0 ; Save the link register
3150 stw r24,FM_ARG0+0x00(r1) ; Save a register
3151 stw r25,FM_ARG0+0x04(r1) ; Save a register
3152 mr r25,r7 ; Remember address of next va
3153 stw r26,FM_ARG0+0x08(r1) ; Save a register
3154 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3155 stw r28,FM_ARG0+0x10(r1) ; Save a register
3156 mr r24,r6 ; Save the new protection flags
3157 stw r29,FM_ARG0+0x14(r1) ; Save a register
3158 stw r30,FM_ARG0+0x18(r1) ; Save a register
3159 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3160 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 3161
91447636
A
3162#if DEBUG
3163 lwz r11,pmapFlags(r3) ; Get pmaps flags
3164 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3165 bne hpPanic ; Call not valid for guest shadow assist pmap
3166#endif
3167
55e303ae
A
3168 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3169 lwz r7,pmapvr+4(r3) ; Get the second part
d7e50217 3170
d7e50217 3171
55e303ae 3172 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 3173
55e303ae
A
3174 mr r27,r11 ; Remember the old MSR
3175 mr r26,r12 ; Remember the feature bits
9bccf70c 3176
55e303ae 3177 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 3178
55e303ae
A
3179 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3180
3181 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
9bccf70c 3182
55e303ae
A
3183hpSF1: mr r29,r4 ; Save top half of vaddr
3184 mr r30,r5 ; Save the bottom half
3185
3186 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3187 bl sxlkShared ; Go get a shared lock on the mapping lists
3188 mr. r3,r3 ; Did we get the lock?
3189 bne-- hpBadLock ; Nope...
d7e50217 3190
55e303ae
A
3191 mr r3,r28 ; get the pmap address
3192 mr r4,r29 ; Get bits 0:31 to look for
3193 mr r5,r30 ; Get bits 32:64
de355530 3194
55e303ae 3195 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
d7e50217 3196
91447636
A
3197 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3198 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3199 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3200 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
55e303ae 3201 mr. r31,r3 ; Save the mapping if we found it
55e303ae
A
3202 mr r29,r4 ; Save next va high half
3203 mr r30,r5 ; Save next va low half
d7e50217 3204
55e303ae 3205 beq-- hpNotFound ; Not found...
de355530 3206
91447636 3207 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
d7e50217 3208
55e303ae
A
3209 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3210
3211 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3212
91447636 3213 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
55e303ae
A
3214 mr. r3,r3 ; Was there a previously valid PTE?
3215
3216 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3217
3218 beq-- hpNoOld32 ; Nope...
1c79356b 3219
55e303ae
A
3220 stw r5,4(r3) ; Store second half of PTE
3221 eieio ; Make sure we do not reorder
3222 stw r4,0(r3) ; Revalidate the PTE
3223
3224 eieio ; Make sure all updates come first
3225 stw r6,0(r7) ; Unlock PCA
3226
3227hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3228 bl sxlkUnlock ; Unlock the search list
de355530 3229
55e303ae
A
3230 li r3,mapRtOK ; Set normal return
3231 b hpR32 ; Join common...
3232
3233 .align 5
1c79356b 3234
d7e50217 3235
55e303ae
A
3236hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3237
91447636 3238 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
55e303ae
A
3239 mr. r3,r3 ; Was there a previously valid PTE?
3240
3241 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3242
3243 beq-- hpNoOld64 ; Nope...
d7e50217 3244
55e303ae
A
3245 std r5,8(r3) ; Store second half of PTE
3246 eieio ; Make sure we do not reorder
3247 std r4,0(r3) ; Revalidate the PTE
de355530 3248
55e303ae
A
3249 eieio ; Make sure all updates come first
3250 stw r6,0(r7) ; Unlock PCA
de355530 3251
55e303ae
A
3252hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3253 bl sxlkUnlock ; Unlock the search list
de355530 3254
55e303ae
A
3255 li r3,mapRtOK ; Set normal return
3256 b hpR64 ; Join common...
de355530 3257
55e303ae
A
3258 .align 5
3259
3260hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3261
3262hpR32: mtmsr r27 ; Restore enables/translation/etc.
3263 isync
3264 b hpReturnC ; Join common...
3265
3266hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3267 isync
3268
3269hpReturnC: stw r29,0(r25) ; Save the top of the next va
3270 stw r30,4(r25) ; Save the bottom of the next va
3271 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3272 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3273 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3274 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3275 mtlr r0 ; Restore the return
3276 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3277 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3278 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3279 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3280 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3281 lwz r1,0(r1) ; Pop the stack
3282 blr ; Leave...
3283
3284 .align 5
3285
3286hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3287 b hpReturn ; Leave....
d7e50217 3288
55e303ae
A
3289hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3290 bl sxlkUnlock ; Unlock the search list
d7e50217 3291
55e303ae
A
3292 li r3,mapRtNotFnd ; Set that we did not find the requested page
3293 b hpReturn ; Leave....
3294
3295hpNotAllowed:
3296 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3297 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3298 bne-- hpNotFound ; Yeah...
3299 bl sxlkUnlock ; Unlock the search list
3300
3301 li r3,mapRtBlock ; Assume it was a block
91447636
A
3302 rlwinm r0,r7,0,mpType ; Isolate mapping type
3303 cmplwi r0,mpBlock ; Is this a block mapping?
3304 beq++ hpReturn ; Yes, leave...
55e303ae
A
3305
3306 li r3,mapRtPerm ; Set that we hit a permanent page
3307 b hpReturn ; Leave....
9bccf70c 3308
91447636
A
3309hpPanic: lis r0,hi16(Choke) ; System abend
3310 ori r0,r0,lo16(Choke) ; System abend
3311 li r3,failMapping ; Show that we failed some kind of mapping thing
3312 sc
3313
9bccf70c 3314
55e303ae
A
3315;
3316; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3317;
3318; Returns following code ORed with RC from mapping
3319; mapRtOK - if all is ok
3320; mapRtBadLk - if mapping lock fails
3321; mapRtNotFnd - if mapping is not found
3322;
3323 .align 5
3324 .globl EXT(hw_test_rc)
9bccf70c 3325
55e303ae
A
3326LEXT(hw_test_rc)
3327 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3328 mflr r0 ; Save the link register
3329 stw r24,FM_ARG0+0x00(r1) ; Save a register
3330 stw r25,FM_ARG0+0x04(r1) ; Save a register
3331 stw r26,FM_ARG0+0x08(r1) ; Save a register
3332 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3333 stw r28,FM_ARG0+0x10(r1) ; Save a register
3334 mr r24,r6 ; Save the reset request
3335 stw r29,FM_ARG0+0x14(r1) ; Save a register
3336 stw r30,FM_ARG0+0x18(r1) ; Save a register
3337 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3338 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
9bccf70c 3339
91447636
A
3340#if DEBUG
3341 lwz r11,pmapFlags(r3) ; Get pmaps flags
3342 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3343 bne htrPanic ; Call not valid for guest shadow assist pmap
3344#endif
3345
55e303ae
A
3346 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3347 lwz r7,pmapvr+4(r3) ; Get the second part
0b4e3aa0 3348
9bccf70c 3349
55e303ae 3350 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 3351
55e303ae
A
3352 mr r27,r11 ; Remember the old MSR
3353 mr r26,r12 ; Remember the feature bits
9bccf70c 3354
55e303ae 3355 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 3356
55e303ae 3357 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
1c79356b 3358
55e303ae 3359 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 3360
55e303ae
A
3361htrSF1: mr r29,r4 ; Save top half of vaddr
3362 mr r30,r5 ; Save the bottom half
1c79356b 3363
55e303ae
A
3364 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3365 bl sxlkShared ; Go get a shared lock on the mapping lists
3366 mr. r3,r3 ; Did we get the lock?
3367 li r25,0 ; Clear RC
3368 bne-- htrBadLock ; Nope...
3369
3370 mr r3,r28 ; get the pmap address
3371 mr r4,r29 ; Get bits 0:31 to look for
3372 mr r5,r30 ; Get bits 32:64
d7e50217 3373
55e303ae 3374 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
9bccf70c 3375
91447636
A
3376 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3377 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3378 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3379 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
55e303ae 3380 mr. r31,r3 ; Save the mapping if we found it
91447636 3381 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
d7e50217 3382
91447636 3383 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
1c79356b 3384
55e303ae
A
3385 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3386
3387 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3388
3389 cmplwi cr1,r24,0 ; Do we want to clear RC?
3390 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3391 mr. r3,r3 ; Was there a previously valid PTE?
3392 li r0,lo16(mpR|mpC) ; Get bits to clear
9bccf70c 3393
55e303ae
A
3394 and r25,r5,r0 ; Save the RC bits
3395 beq++ cr1,htrNoClr32 ; Nope...
3396
3397 andc r12,r12,r0 ; Clear mapping copy of RC
3398 andc r5,r5,r0 ; Clear PTE copy of RC
3399 sth r12,mpVAddr+6(r31) ; Set the new RC
9bccf70c 3400
55e303ae 3401htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
d7e50217 3402
55e303ae
A
3403 sth r5,6(r3) ; Store updated RC
3404 eieio ; Make sure we do not reorder
3405 stw r4,0(r3) ; Revalidate the PTE
9bccf70c 3406
55e303ae
A
3407 eieio ; Make sure all updates come first
3408 stw r6,0(r7) ; Unlock PCA
1c79356b 3409
55e303ae
A
3410htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3411 bl sxlkUnlock ; Unlock the search list
3412 li r3,mapRtOK ; Set normal return
3413 b htrR32 ; Join common...
1c79356b 3414
55e303ae
A
3415 .align 5
3416
3417
3418htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3419
3420 cmplwi cr1,r24,0 ; Do we want to clear RC?
3421 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3422 mr. r3,r3 ; Was there a previously valid PTE?
3423 li r0,lo16(mpR|mpC) ; Get bits to clear
1c79356b 3424
55e303ae
A
3425 and r25,r5,r0 ; Save the RC bits
3426 beq++ cr1,htrNoClr64 ; Nope...
3427
3428 andc r12,r12,r0 ; Clear mapping copy of RC
3429 andc r5,r5,r0 ; Clear PTE copy of RC
3430 sth r12,mpVAddr+6(r31) ; Set the new RC
1c79356b 3431
55e303ae
A
3432htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3433
3434 sth r5,14(r3) ; Store updated RC
3435 eieio ; Make sure we do not reorder
3436 std r4,0(r3) ; Revalidate the PTE
1c79356b 3437
55e303ae
A
3438 eieio ; Make sure all updates come first
3439 stw r6,0(r7) ; Unlock PCA
1c79356b 3440
55e303ae
A
3441htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3442 bl sxlkUnlock ; Unlock the search list
3443 li r3,mapRtOK ; Set normal return
3444 b htrR64 ; Join common...
de355530 3445
55e303ae
A
3446 .align 5
3447
3448htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
de355530 3449
55e303ae
A
3450htrR32: mtmsr r27 ; Restore enables/translation/etc.
3451 isync
3452 b htrReturnC ; Join common...
de355530 3453
55e303ae
A
3454htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3455 isync
1c79356b 3456
55e303ae
A
3457htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3458 or r3,r3,r25 ; Send the RC bits back
3459 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3460 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3461 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3462 mtlr r0 ; Restore the return
3463 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3464 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3465 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3466 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3467 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3468 lwz r1,0(r1) ; Pop the stack
1c79356b
A
3469 blr ; Leave...
3470
3471 .align 5
3472
55e303ae
A
3473htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3474 b htrReturn ; Leave....
1c79356b 3475
55e303ae
A
3476htrNotFound:
3477 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3478 bl sxlkUnlock ; Unlock the search list
1c79356b 3479
55e303ae
A
3480 li r3,mapRtNotFnd ; Set that we did not find the requested page
3481 b htrReturn ; Leave....
3482
91447636
A
3483htrPanic: lis r0,hi16(Choke) ; System abend
3484 ori r0,r0,lo16(Choke) ; System abend
3485 li r3,failMapping ; Show that we failed some kind of mapping thing
3486 sc
3487
3488
3489;
3490;
3491; mapFindLockPN - find and lock physent for a given page number
3492;
3493;
3494 .align 5
3495mapFindLockPN:
3496 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3497 mr r2,r3 ; Save our target
3498 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3499
3500mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3501 lwz r5,mrStart(r9) ; Get start of table entry
3502 lwz r0,mrEnd(r9) ; Get end of table entry
3503 addi r9,r9,mrSize ; Point to the next slot
3a60a9f5 3504 cmplwi cr7,r3,0 ; Are we at the end of the table?
91447636
A
3505 cmplw r2,r5 ; See if we are in this table
3506 cmplw cr1,r2,r0 ; Check end also
3507 sub r4,r2,r5 ; Calculate index to physical entry
3a60a9f5 3508 beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry...
91447636
A
3509 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3510 slwi r4,r4,3 ; Get offset to physical entry
3511
3512 blt-- mapFLPNitr ; Did not find it...
3513
3514 add r3,r3,r4 ; Point right to the slot
3515 b mapPhysLock ; Join common lock code
3516
3517mapFLPNmiss:
3518 li r3,0 ; Show that we did not find it
3519 blr ; Leave...
3520
3521
3522;
55e303ae
A
3523; mapPhysFindLock - find physent list and lock it
3524; R31 points to mapping
3525;
3526 .align 5
3527
3528mapPhysFindLock:
3529 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3530 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
91447636 3531 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
55e303ae
A
3532 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3533 add r3,r3,r4 ; Point to table entry
3534 lwz r5,mpPAddr(r31) ; Get physical page number
3535 lwz r7,mrStart(r3) ; Get the start of range
3536 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3537 sub r6,r5,r7 ; Get index to physent
3538 rlwinm r6,r6,3,0,28 ; Get offset to physent
3539 add r3,r3,r6 ; Point right to the physent
3540 b mapPhysLock ; Join in the lock...
3541
3542;
3543; mapPhysLock - lock a physent list
3544; R3 contains list header
3545;
3546 .align 5
3547
3548mapPhysLockS:
3549 li r2,lgKillResv ; Get a spot to kill reservation
3550 stwcx. r2,0,r2 ; Kill it...
3551
3552mapPhysLockT:
3553 lwz r2,ppLink(r3) ; Get physent chain header
3554 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3555 bne-- mapPhysLockT ; Nope, still locked...
3556
3557mapPhysLock:
3558 lwarx r2,0,r3 ; Get the lock
3559 rlwinm. r0,r2,0,0,0 ; Is it locked?
3560 oris r0,r2,0x8000 ; Set the lock bit
3561 bne-- mapPhysLockS ; It is locked, spin on it...
3562 stwcx. r0,0,r3 ; Try to stuff it back...
3563 bne-- mapPhysLock ; Collision, try again...
3564 isync ; Clear any speculations
3565 blr ; Leave...
3566
3567
3568;
3569; mapPhysUnlock - unlock a physent list
3570; R3 contains list header
3571;
3572 .align 5
3573
3574mapPhysUnlock:
3575 lwz r0,ppLink(r3) ; Get physent chain header
3576 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3577 eieio ; Make sure unlock comes last
3578 stw r0,ppLink(r3) ; Unlock the list
3579 blr
3580
3581;
3582; mapPhysMerge - merge the RC bits into the master copy
3583; R3 points to the physent
3584; R4 contains the RC bits
3585;
3586; Note: we just return if RC is 0
3587;
3588 .align 5
3589
3590mapPhysMerge:
3591 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3592 la r5,ppLink+4(r3) ; Point to the RC field
3593 beqlr-- ; Leave if RC is 0...
3594
3595mapPhysMergeT:
3596 lwarx r6,0,r5 ; Get the RC part
3597 or r6,r6,r4 ; Merge in the RC
3598 stwcx. r6,0,r5 ; Try to stuff it back...
3599 bne-- mapPhysMergeT ; Collision, try again...
3600 blr ; Leave...
3601
3602;
3603; Sets the physent link pointer and preserves all flags
3604; The list is locked
3605; R3 points to physent
3606; R4 has link to set
3607;
3608
3609 .align 5
3610
3611mapPhyCSet32:
3612 la r5,ppLink+4(r3) ; Point to the link word
3613
3614mapPhyCSetR:
3615 lwarx r2,0,r5 ; Get the link and flags
91447636 3616 rlwimi r4,r2,0,ppFlags ; Insert the flags
55e303ae
A
3617 stwcx. r4,0,r5 ; Stick them back
3618 bne-- mapPhyCSetR ; Someone else did something, try again...
3619 blr ; Return...
3620
3621 .align 5
3622
3623mapPhyCSet64:
91447636
A
3624 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3625 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
3626
3627mapPhyCSet64x:
3628 ldarx r2,0,r3 ; Get the link and flags
3629 and r5,r2,r0 ; Isolate the flags
3630 or r6,r4,r5 ; Add them to the link
3631 stdcx. r6,0,r3 ; Stick them back
3632 bne-- mapPhyCSet64x ; Someone else did something, try again...
3633 blr ; Return...
3634
3635;
3636; mapBumpBusy - increment the busy count on a mapping
3637; R3 points to mapping
3638;
3639
3640 .align 5
3641
3642mapBumpBusy:
3643 lwarx r4,0,r3 ; Get mpBusy
3644 addis r4,r4,0x0100 ; Bump the busy count
3645 stwcx. r4,0,r3 ; Save it back
3646 bne-- mapBumpBusy ; This did not work, try again...
3647 blr ; Leave...
3648
3649;
3650; mapDropBusy - increment the busy count on a mapping
3651; R3 points to mapping
3652;
3653
3654 .globl EXT(mapping_drop_busy)
3655 .align 5
3656
3657LEXT(mapping_drop_busy)
3658mapDropBusy:
3659 lwarx r4,0,r3 ; Get mpBusy
3660 addis r4,r4,0xFF00 ; Drop the busy count
3661 stwcx. r4,0,r3 ; Save it back
3662 bne-- mapDropBusy ; This did not work, try again...
3663 blr ; Leave...
3664
3665;
3666; mapDrainBusy - drain the busy count on a mapping
3667; R3 points to mapping
3668; Note: we already have a busy for ourselves. Only one
3669; busy per processor is allowed, so we just spin here
3670; waiting for the count to drop to 1.
3671; Also, the mapping can not be on any lists when we do this
3672; so all we are doing is waiting until it can be released.
3673;
3674
3675 .align 5
3676
3677mapDrainBusy:
3678 lwz r4,mpFlags(r3) ; Get mpBusy
3679 rlwinm r4,r4,8,24,31 ; Clean it up
3680 cmplwi r4,1 ; Is is just our busy?
3681 beqlr++ ; Yeah, it is clear...
3682 b mapDrainBusy ; Try again...
3683
3684
3685
3686;
3687; handleDSeg - handle a data segment fault
3688; handleISeg - handle an instruction segment fault
3689;
3690; All that we do here is to map these to DSI or ISI and insure
3691; that the hash bit is not set. This forces the fault code
3692; to also handle the missing segment.
3693;
3694; At entry R2 contains per_proc, R13 contains savarea pointer,
3695; and R11 is the exception code.
3696;
3697
3698 .align 5
3699 .globl EXT(handleDSeg)
3700
3701LEXT(handleDSeg)
3702
3703 li r11,T_DATA_ACCESS ; Change fault to DSI
3704 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3705 b EXT(handlePF) ; Join common...
3706
3707 .align 5
3708 .globl EXT(handleISeg)
3709
3710LEXT(handleISeg)
3711
3712 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3713 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3714 b EXT(handlePF) ; Join common...
3715
3716
3717/*
3718 * handlePF - handle a page fault interruption
3719 *
3720 * At entry R2 contains per_proc, R13 contains savarea pointer,
3721 * and R11 is the exception code.
3722 *
3723 * This first part does a quick check to see if we can handle the fault.
3724 * We canot handle any kind of protection exceptions here, so we pass
3725 * them up to the next level.
3726 *
3727 * NOTE: In order for a page-fault redrive to work, the translation miss
3728 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3729 * before we come here.
3730 */
3731
3732 .align 5
3733 .globl EXT(handlePF)
3734
3735LEXT(handlePF)
3736
3737 mfsprg r12,2 ; Get feature flags
3738 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3739 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3740 mtcrf 0x02,r12 ; move pf64Bit to cr6
3741 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3742 lwz r18,SAVflags(r13) ; Get the flags
3743
3744 beq-- gotIfetch ; We have an IFETCH here...
3745
3746 lwz r27,savedsisr(r13) ; Get the DSISR
3747 lwz r29,savedar(r13) ; Get the first half of the DAR
3748 lwz r30,savedar+4(r13) ; And second half
3749
3750 b ckIfProt ; Go check if this is a protection fault...
3751
3752gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3753 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3754 lwz r30,savesrr0+4(r13) ; And second half
3755 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3756
3757ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3758 li r20,64 ; Set a limit of 64 nests for sanity check
3759 bne-- hpfExit ; Yes... (probably not though)
91447636 3760
55e303ae
A
3761;
3762; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3763; should be loading the user pmap here.
3764;
3765
3766 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3767 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3768 mr r19,r2 ; Remember the per_proc
3769 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3770 mr r23,r30 ; Save the low part of faulting address
3771 beq-- hpfInKern ; Skip if we are in the kernel
3772 la r8,ppUserPmap(r19) ; Point to the current user pmap
3773
3774hpfInKern: mr r22,r29 ; Save the high part of faulting address
3775
3776 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3777
3778;
3779; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3780; predefined value that corresponds to no address space. When we see that value
3781; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3782; cause the proper SR to be loaded.
3783;
3784
3785 lwz r28,4(r8) ; Pick up the pmap
3786 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3787 mr r25,r28 ; Save the original pmap (in case we nest)
91447636
A
3788 lwz r0,pmapFlags(r28) ; Get pmap's flags
3789 bne hpfGVtest ; Segs are not ours if so...
55e303ae
A
3790 mfsrin r4,r30 ; Get the SR that was used for translation
3791 cmplwi r4,invalSpace ; Is this a simulated segment fault?
91447636 3792 bne++ hpfGVtest ; No...
55e303ae
A
3793
3794 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
91447636 3795 b hpfGVtest ; Join on up...
55e303ae
A
3796
3797 .align 5
3798
3799 nop ; Push hpfNest to a 32-byte boundary
3800 nop ; Push hpfNest to a 32-byte boundary
3801 nop ; Push hpfNest to a 32-byte boundary
55e303ae
A
3802
3803hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3804 mr r25,r28 ; Save the original pmap (in case we nest)
91447636
A
3805 lwz r0,pmapFlags(r28) ; Get pmap's flags
3806
3807hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3808 bne hpfGVxlate ; Yup, do accelerated shadow stuff
55e303ae
A
3809
3810;
3811; This is where we loop descending nested pmaps
3812;
3813
3814hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3815 addi r20,r20,-1 ; Count nest try
3816 bl sxlkShared ; Go get a shared lock on the mapping lists
3817 mr. r3,r3 ; Did we get the lock?
3818 bne-- hpfBadLock ; Nope...
3819
3820 mr r3,r28 ; Get the pmap pointer
3821 mr r4,r22 ; Get top of faulting vaddr
3822 mr r5,r23 ; Get bottom of faulting vaddr
3823 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3824
3825 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3826 mr. r31,r3 ; Save the mapping if we found it
3827 cmplwi cr1,r0,0 ; Check for removal
3828 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3829
3830 bt-- cr0_eq,hpfNotFound ; Not found or removing...
91447636
A
3831
3832 rlwinm r0,r7,0,mpType ; Isolate mapping type
3833 cmplwi r0,mpNest ; Are we again nested?
3834 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3835 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
55e303ae
A
3836 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3837
3838 lhz r21,mpSpace(r31) ; Get the space
3839
91447636 3840 bne++ hpfFoundIt ; No, we found our guy...
55e303ae
A
3841
3842
3843#if pmapTransSize != 12
3844#error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3845#endif
91447636 3846 cmplwi r0,mpLinkage ; Linkage mapping?
55e303ae 3847 cmplwi cr1,r20,0 ; Too many nestings?
91447636 3848 beq-- hpfSpclNest ; Do we need to do special handling?
55e303ae
A
3849
3850hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3851 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3852 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3853 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3854 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3855 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3856 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3857 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3858 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3859 slwi r11,r21,3 ; Multiply space by 8
3860 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3861 addc r23,r23,r9 ; Relocate bottom half of vaddr
3862 lwz r10,0(r10) ; Get the actual translation map
3863 slwi r12,r21,2 ; Multiply space by 4
3864 add r10,r10,r11 ; Add in the higher part of the index
3865 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3866 adde r22,r22,r8 ; Relocate the top half of the vaddr
3867 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3868 bl sxlkUnlock ; Unlock the search list
3869
91447636 3870 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
55e303ae 3871 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
91447636
A
3872 cmplwi r28,0 ; Is the pmap paddr valid?
3873 bne+ hpfNest ; Nest into new pmap...
3874 b hpfBadPmap ; Handle bad pmap
55e303ae 3875
91447636 3876hpfGetPmap64:
55e303ae 3877 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
91447636
A
3878 cmpldi r28,0 ; Is the pmap paddr valid?
3879 bne++ hpfNest ; Nest into new pmap...
3880 b hpfBadPmap ; Handle bad pmap
3881
55e303ae
A
3882
3883;
3884; Error condition. We only allow 64 nestings. This keeps us from having to
3885; check for recusive nests when we install them.
3886;
3887
3888 .align 5
3889
3890hpfNestTooMuch:
3891 lwz r20,savedsisr(r13) ; Get the DSISR
3892 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3893 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3894 ori r20,r20,1 ; Indicate that there was a nesting problem
3895 stw r20,savedsisr(r13) ; Stash it
3896 lwz r11,saveexception(r13) ; Restore the exception code
3897 b EXT(PFSExit) ; Yes... (probably not though)
3898
3899;
3900; Error condition - lock failed - this is fatal
3901;
3902
3903 .align 5
3904
3905hpfBadLock:
3906 lis r0,hi16(Choke) ; System abend
3907 ori r0,r0,lo16(Choke) ; System abend
3908 li r3,failMapping ; Show mapping failure
3909 sc
91447636
A
3910
3911;
3912; Error condition - space id selected an invalid pmap - fatal
3913;
3914
3915 .align 5
3916
3917hpfBadPmap:
3918 lis r0,hi16(Choke) ; System abend
3919 ori r0,r0,lo16(Choke) ; System abend
3920 li r3,failPmap ; Show invalid pmap
3921 sc
3922
55e303ae
A
3923;
3924; Did not find any kind of mapping
3925;
3926
3927 .align 5
3928
3929hpfNotFound:
3930 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3931 bl sxlkUnlock ; Unlock it
3932 lwz r11,saveexception(r13) ; Restore the exception code
3933
3934hpfExit: ; We need this because we can not do a relative branch
3935 b EXT(PFSExit) ; Yes... (probably not though)
3936
3937
3938;
3939; Here is where we handle special mappings. So far, the only use is to load a
3940; processor specific segment register for copy in/out handling.
3941;
3942; The only (so far implemented) special map is used for copyin/copyout.
3943; We keep a mapping of a "linkage" mapping in the per_proc.
3944; The linkage mapping is basically a nested pmap that is switched in
3945; as part of context switch. It relocates the appropriate user address
3946; space slice into the right place in the kernel.
3947;
3948
3949 .align 5
3950
3951hpfSpclNest:
91447636
A
3952 la r31,ppUMWmp(r19) ; Just point to the mapping
3953 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
55e303ae
A
3954 b hpfCSrch ; Go continue search...
3955
3956
3957;
3958; We have now found a mapping for the address we faulted on.
3959;
3960
3961;
3962; Here we go about calculating what the VSID should be. We concatanate
3963; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3964; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3965; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3966; the VSID.
3967;
3968; This is used both for segment handling and PTE handling
3969;
3970
3971
3972#if maxAdrSpb != 14
3973#error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3974#endif
3975
91447636
A
3976; Important non-volatile registers at this point ('home' means the final pmap/mapping found
3977; when a multi-level mapping has been successfully searched):
3978; r21: home space id number
3979; r22: relocated high-order 32 bits of vaddr
3980; r23: relocated low-order 32 bits of vaddr
3981; r25: pmap physical address
3982; r27: dsisr
3983; r28: home pmap physical address
3984; r29: high-order 32 bits of faulting vaddr
3985; r30: low-order 32 bits of faulting vaddr
3986; r31: mapping's physical address
3987
55e303ae
A
3988 .align 5
3989
3990hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
91447636 3991hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
55e303ae
A
3992 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3993 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3994 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
91447636 3995 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
55e303ae
A
3996 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3997 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3998 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3999 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
4000 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
4001 xor r14,r14,r20 ; Calculate the top half of VSID
4002 xor r15,r15,r21 ; Calculate the bottom half of the VSID
4003 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
4004 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
4005 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
4006 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
4007 or r12,r12,r15 ; Add key into the bottom of VSID
4008;
4009; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4010
4011 bne++ hpfPteMiss ; Nope, normal PTE miss...
4012
4013;
4014; Here is the only place that we make an entry in the pmap segment cache.
4015;
4016; Note that we do not make an entry in the segment cache for special
4017; nested mappings. This makes the copy in/out segment get refreshed
4018; when switching threads.
4019;
4020; The first thing that we do is to look up the ESID we are going to load
4021; into a segment in the pmap cache. If it is already there, this is
4022; a segment that appeared since the last time we switched address spaces.
4023; If all is correct, then it was another processors that made the cache
4024; entry. If not, well, it is an error that we should die on, but I have
4025; not figured a good way to trap it yet.
4026;
4027; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4028; an entry based on the generation number, update the cache entry, and
4029; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4030; entries that correspond to the last 4 bits (32:35 for 64-bit and
4031; 0:3 for 32-bit) of the ESID.
4032;
4033; Then we unlock and bail.
4034;
4035; First lock it. Then select a free slot or steal one based on the generation
4036; number. Then store it, update the allocation flags, and unlock.
4037;
4038; The cache entry contains an image of the ESID/VSID pair we would load for
4039; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4040;
4041; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4042; the current one, which may have changed because we nested.
4043;
4044; Also remember that we do not store the valid bit in the ESID. If we
4045; od, this will break some other stuff.
4046;
4047
4048 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4049
4050 mr r3,r25 ; Point to the pmap
37839358
A
4051 mr r4,r29 ; ESID high half
4052 mr r5,r30 ; ESID low half
55e303ae
A
4053 bl pmapCacheLookup ; Go see if this is in the cache already
4054
4055 mr. r3,r3 ; Did we find it?
4056 mr r4,r11 ; Copy this to a different register
4057
4058 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4059
4060 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4061 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4062
4063 cntlzw r7,r4 ; Find a free slot
4064
4065 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4066 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4067 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4068 addi r5,r4,1 ; Bump the generation number
4069 and r7,r7,r6 ; Clear bit number if none empty
4070 andc r8,r4,r6 ; Clear generation count if we found an empty
4071 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4072 or r7,r7,r8 ; Select a slot number
4073 li r8,0 ; Clear
4074 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4075 oris r8,r8,0x8000 ; Get the high bit on
4076 la r9,pmapSegCache(r25) ; Point to the segment cache
4077 slwi r6,r7,4 ; Get index into the segment cache
4078 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4079 srw r8,r8,r7 ; Get the mask
4080 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4081 li r0,0 ; Clear
4082 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4083 oris r0,r0,0xF000 ; Get the sub-tag mask
4084 add r9,r9,r6 ; Point to the cache slot
4085 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4086 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4087
4088 stw r29,sgcESID(r9) ; Save the top of the ESID
4089 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4090 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4091 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4092 or r10,r10,r5 ; Stick in subtag in case top half
4093 or r11,r11,r5 ; Stick in subtag in case bottom half
4094 stw r14,sgcVSID(r9) ; Save the top of the VSID
4095 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4096 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4097 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4098
4099 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4100 b hpfNoCacheEnt ; Go finish up...
4101
4102hpfSCSTbottom:
4103 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4104
4105
4106hpfNoCacheEnt:
4107 eieio ; Make sure cache is updated before lock
4108 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4109
4110
4111hpfNoCacheEnt2:
4112 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4113 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4114
4115;
4116; Make and enter 32-bit segment register
4117;
4118
4119 lwz r16,validSegs(r19) ; Get the valid SR flags
4120 xor r12,r12,r4 ; Alter the storage key before loading segment register
4121 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4122 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4123 lis r0,0x8000 ; Set bit 0
4124 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4125 srw r0,r0,r2 ; Get bit corresponding to SR
4126 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4127 or r16,r16,r0 ; Show that SR is valid
4128
4129 mtsrin r6,r30 ; Set the actual SR
4130
4131 stw r16,validSegs(r19) ; Set the valid SR flags
4132
4133 b hpfPteMiss ; SR loaded, go do a PTE...
4134
4135;
4136; Make and enter 64-bit segment look-aside buffer entry.
4137; Note that the cache entry is the right format except for valid bit.
4138; We also need to convert from long long to 64-bit register values.
4139;
4140
4141
4142 .align 5
4143
4144hpfLoadSeg64:
4145 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4146 sldi r8,r29,32 ; Move high order address over
4147 sldi r10,r14,32 ; Move high part of VSID over
4148
4149 not r3,r16 ; Make valids be 0s
4150 li r0,1 ; Prepare to set bit 0
4151
4152 cntlzd r17,r3 ; Find a free SLB
4153 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4154 or r9,r8,r30 ; Form full 64-bit address
4155 cmplwi r17,63 ; Did we find a free SLB entry?
4156 sldi r0,r0,63 ; Get bit 0 set
4157 or r10,r10,r12 ; Move in low part and keys
4158 addi r17,r17,1 ; Skip SLB 0 always
4159 blt++ hpfFreeSeg ; Yes, go load it...
4160
4161;
4162; No free SLB entries, select one that is in use and invalidate it
4163;
4164 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4165 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4166 addi r4,r4,1 ; Set next slot to steal
4167 slbmfee r7,r17 ; Get the entry that is in the selected spot
4168 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4169 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4170 srawi r2,r2,31 ; Get -1 if steal index still in range
4171 slbie r7 ; Invalidate the in-use SLB entry
4172 and r4,r4,r2 ; Reset steal index when it should wrap
4173 isync ;
4174
4175 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4176;
4177; We are now ready to stick the SLB entry in the SLB and mark it in use
4178;
4179
4180hpfFreeSeg:
4181 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4182 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4183 srd r0,r0,r4 ; Set bit mask for allocation
4184 oris r9,r9,0x0800 ; Turn on the valid bit
4185 or r16,r16,r0 ; Turn on the allocation flag
4186 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4187
4188 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4189 slbie r7 ; Blow away a potential duplicate
4190
4191hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4192
4193 std r16,validSegs(r19) ; Mark as valid
4194 b hpfPteMiss ; STE loaded, go do a PTE...
4195
4196;
4197; The segment has been set up and loaded if need be. Now we are ready to build the
4198; PTE and get it into the hash table.
4199;
4200; Note that there is actually a race here. If we start fault processing on
4201; a different pmap, i.e., we have descended into a nested pmap, it is possible
4202; that the nest could have been removed from the original pmap. We would
4203; succeed with this translation anyway. I do not think we need to worry
4204; about this (famous last words) because nobody should be unnesting anything
4205; if there are still people activily using them. It should be up to the
4206; higher level VM system to put the kibosh on this.
4207;
4208; There is also another race here: if we fault on the same mapping on more than
4209; one processor at the same time, we could end up with multiple PTEs for the same
4210; mapping. This is not a good thing.... We really only need one of the
4211; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4212; the mapping. If we see that set, we just abandon the handler and hope that by
4213; the time we restore context and restart the interrupted code, the fault has
4214; been resolved by the other guy. If not, we will take another fault.
4215;
4216
4217;
4218; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4219; It is required to stay there until after we call mapSelSlot!!!!
4220;
4221
4222 .align 5
4223
4224hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4225 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4226 li r3,mpHValid ; Get the PTE valid bit
4227 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4228 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4229 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4230 and. r12,r12,r3 ; Isolate the valid bit
4231 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4232 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
91447636
A
4233 rlwinm r0,r2,0,mpType ; Isolate mapping type
4234 cmplwi r0,mpBlock ; Is this a block mapping?
4235 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
55e303ae
A
4236 stwcx. r2,0,r31 ; Store the flags
4237 bne-- hpfPteMiss ; Collision, try again...
4238
4239 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4240
4241;
4242; At this point we are about to do the 32-bit PTE generation.
4243;
4244; The following is the R14:R15 pair that contains the "shifted" VSID:
4245;
4246; 1 2 3 4 4 5 6
4247; 0 8 6 4 2 0 8 6 3
4248; +--------+--------+--------+--------+--------+--------+--------+--------+
4249; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4250; +--------+--------+--------+--------+--------+--------+--------+--------+
4251;
4252; The 24 bits of the 32-bit architecture VSID is in the following:
4253;
4254; 1 2 3 4 4 5 6
4255; 0 8 6 4 2 0 8 6 3
4256; +--------+--------+--------+--------+--------+--------+--------+--------+
4257; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4258; +--------+--------+--------+--------+--------+--------+--------+--------+
4259;
4260
4261
4262hpfBldPTE32:
4263 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4264 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4265
4266 mfsdr1 r27 ; Get the hash table base address
4267
4268 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4269 rlwinm r18,r23,10,26,31 ; Extract the API
4270 xor r19,r15,r0 ; Calculate hash << 12
4271 mr r2,r25 ; Save the flag part of the mapping
4272 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4273 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4274 rlwinm r25,r25,0,0,19 ; Clear out the flags
4275 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4276 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4277 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4278 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4279 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4280 add r24,r24,r25 ; Adjust to true physical address
4281 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4282 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4283 and r19,r19,r16 ; Wrap hash table offset into the hash table
4284 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4285 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4286 add r19,r19,r27 ; Point to the PTEG
4287 subfic r20,r20,-4 ; Get negative offset to PCA
4288 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4289 add r20,r20,r27 ; Point to the PCA slot
4290
4291;
4292; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4293; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4294;
4295; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4296; that some other processor beat us and stuck in a PTE or that
4297; all we had was a simple segment exception and the PTE was there the whole time.
4298; If we find one a pointer, we are done.
4299;
4300
4301 mr r7,r20 ; Copy the PCA pointer
4302 bl mapLockPteg ; Lock the PTEG
4303
4304 lwz r12,mpPte(r31) ; Get the offset to the PTE
4305 mr r17,r6 ; Remember the PCA image
4306 mr r16,r6 ; Prime the post-select PCA image
4307 andi. r0,r12,mpHValid ; Is there a PTE here already?
4308 li r21,8 ; Get the number of slots
4309
4310 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4311
4312 bne- hpfBailOut ; Someone already did this for us...
4313
4314;
91447636 4315; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
55e303ae
A
4316; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4317; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4318; R4 returns the slot index.
4319;
4320; REMEMBER: CR7 indicates that we are building a block mapping.
4321;
4322
4323hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4324 mr r6,r17 ; Get back the original PCA
4325 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4326 blt- hpfBailOut ; Holy Cow, all slots are locked...
4327
4328 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4329
a3d08fcd
A
4330 cmplwi cr5,r3,1 ; Did we steal a slot?
4331 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
55e303ae
A
4332 mr r16,r6 ; Remember the PCA image after selection
4333 blt+ cr5,hpfInser32 ; Nope, no steal...
4334
4335 lwz r6,0(r19) ; Get the old PTE
4336 lwz r7,4(r19) ; Get the real part of the stealee
4337 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4338 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4339 srwi r3,r7,12 ; Change phys address to a ppnum
4340 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4341 cmplwi cr1,r3,0 ; Check if this is in RAM
4342 bne- hpfNoPte32 ; Could not get it, try for another...
4343
4344 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4345
4346hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4347
4348 sync ; Make sure the invalid is stored
4349 li r9,tlbieLock ; Get the TLBIE lock
4350 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4351
4352hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4353 mfsprg r4,0 ; Get the per_proc
4354 rlwinm r8,r6,25,18,31 ; Extract the space ID
4355 rlwinm r11,r6,25,18,31 ; Extract the space ID
4356 lwz r7,hwSteals(r4) ; Get the steal count
4357 srwi r2,r6,7 ; Align segment number with hash
4358 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4359 mr. r0,r0 ; Is it locked?
4360 srwi r0,r19,6 ; Align PTEG offset for back hash
4361 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4362 xor r11,r11,r0 ; Hash backwards to partial vaddr
4363 rlwinm r12,r2,14,0,3 ; Shift segment up
4364 mfsprg r2,2 ; Get feature flags
4365 li r0,1 ; Get our lock word
4366 rlwimi r12,r6,22,4,9 ; Move up the API
4367 bne- hpfTLBIE32 ; It is locked, go wait...
4368 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4369
4370 stwcx. r0,0,r9 ; Try to get it
4371 bne- hpfTLBIE32 ; We was beat...
4372 addi r7,r7,1 ; Bump the steal count
4373
4374 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4375 li r0,0 ; Lock clear value
4376
4377 tlbie r12 ; Invalidate it everywhere
4378
91447636 4379
55e303ae
A
4380 beq- hpfNoTS32 ; Can not have MP on this machine...
4381
4382 eieio ; Make sure that the tlbie happens first
4383 tlbsync ; Wait for everyone to catch up
4384 sync ; Make sure of it all
91447636
A
4385
4386hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
5eebf738
A
4387
4388 stw r7,hwSteals(r4) ; Save the steal count
55e303ae
A
4389 bgt cr5,hpfInser32 ; We just stole a block mapping...
4390
4391 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4392
4393 la r11,ppLink+4(r3) ; Point to the master RC copy
4394 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4395 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4396
4397hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4398 or r0,r0,r2 ; Merge in the new RC
4399 stwcx. r0,0,r11 ; Try to stick it back
4400 bne- hpfMrgRC32 ; Try again if we collided...
4401
4402
91447636 4403hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
55e303ae
A
4404 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4405
4406 lhz r10,mpSpace(r7) ; Get the space
4407 lwz r9,mpVAddr+4(r7) ; And the vaddr
4408 cmplw cr1,r10,r8 ; Is this one of ours?
4409 xor r9,r12,r9 ; Compare virtual address
4410 cmplwi r9,0x1000 ; See if we really match
4411 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4412 beq+ hpfFPnch2 ; Yes, found ours...
4413
4414 lwz r7,mpAlias+4(r7) ; Chain on to the next
4415 b hpfFPnch ; Check it out...
4416
4417hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4418 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4419 bl mapPhysUnlock ; Unlock the physent now
4420
4421hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4422
4423 stw r24,4(r19) ; Stuff in the real part of the PTE
4424 eieio ; Make sure this gets there first
4425
4426 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4427 mr r17,r16 ; Get the PCA image to save
4428 b hpfFinish ; Go join the common exit code...
4429
4430
4431;
4432; At this point we are about to do the 64-bit PTE generation.
4433;
4434; The following is the R14:R15 pair that contains the "shifted" VSID:
4435;
4436; 1 2 3 4 4 5 6
4437; 0 8 6 4 2 0 8 6 3
4438; +--------+--------+--------+--------+--------+--------+--------+--------+
4439; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4440; +--------+--------+--------+--------+--------+--------+--------+--------+
4441;
4442;
4443
4444 .align 5
4445
4446hpfBldPTE64:
4447 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4448 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4449
4450 mfsdr1 r27 ; Get the hash table base address
4451
4452 sldi r11,r22,32 ; Slide top of adjusted EA over
4453 sldi r14,r14,32 ; Slide top of VSID over
4454 rlwinm r5,r27,0,27,31 ; Isolate the size
4455 eqv r16,r16,r16 ; Get all foxes here
4456 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4457 mr r2,r10 ; Save the flag part of the mapping
4458 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4459 rldicr r27,r27,0,45 ; Clean up the hash table base
4460 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4461 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4462 subfic r5,r5,46 ; Get number of leading zeros
4463 xor r19,r0,r15 ; Calculate hash
4464 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4465 srd r16,r16,r5 ; Shift over to get length of table
4466 srdi r19,r19,5 ; Convert page offset to hash table offset
4467 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4468 rldicr r10,r10,0,51 ; Clear out flags
4469 sldi r24,r24,12 ; Change ppnum to physical address
4470 sub r11,r11,r10 ; Get the offset from the base mapping
4471 and r19,r19,r16 ; Wrap into hash table
4472 add r24,r24,r11 ; Get actual physical address of this page
4473 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4474 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4475 subfic r20,r20,-4 ; Get negative offset to PCA
4476 ori r24,r24,lo16(mpR) ; Force on the reference bit
4477 add r20,r20,r27 ; Point to the PCA slot
4478 add r19,r19,r27 ; Point to the PTEG
4479
4480;
4481; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4482; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4483;
4484; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4485; that some other processor beat us and stuck in a PTE or that
4486; all we had was a simple segment exception and the PTE was there the whole time.
4487; If we find one a pointer, we are done.
4488;
4489
4490 mr r7,r20 ; Copy the PCA pointer
4491 bl mapLockPteg ; Lock the PTEG
4492
4493 lwz r12,mpPte(r31) ; Get the offset to the PTE
4494 mr r17,r6 ; Remember the PCA image
4495 mr r18,r6 ; Prime post-selection PCA image
4496 andi. r0,r12,mpHValid ; See if we have a PTE now
4497 li r21,8 ; Get the number of slots
4498
4499 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4500
4501 bne-- hpfBailOut ; Someone already did this for us...
4502
4503;
4504; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4505; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4506; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4507; R4 returns the slot index.
4508;
4509; REMEMBER: CR7 indicates that we are building a block mapping.
4510;
4511
4512hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4513 mr r6,r17 ; Restore original state of PCA
4514 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4515 blt- hpfBailOut ; Holy Cow, all slots are locked...
4516
4517 bl mapSelSlot ; Go select a slot
4518
4519 cmplwi cr5,r3,1 ; Did we steal a slot?
55e303ae 4520 mr r18,r6 ; Remember the PCA image after selection
a3d08fcd 4521 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
55e303ae
A
4522 lwz r10,hwSteals(r2) ; Get the steal count
4523 blt++ cr5,hpfInser64 ; Nope, no steal...
4524
4525 ld r6,0(r19) ; Get the old PTE
4526 ld r7,8(r19) ; Get the real part of the stealee
4527 rldicr r6,r6,0,62 ; Clear the valid bit
4528 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4529 srdi r3,r7,12 ; Change page address to a page address
4530 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4531 cmplwi cr1,r3,0 ; Check if this is in RAM
4532 bne-- hpfNoPte64 ; Could not get it, try for another...
4533
4534 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4535
4536hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4537 li r9,tlbieLock ; Get the TLBIE lock
4538
4539 srdi r11,r6,5 ; Shift VSID over for back hash
4540 mfsprg r4,0 ; Get the per_proc
4541 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4542 sync ; Make sure the invalid is stored
4543
4544 sldi r12,r6,16 ; Move AVPN to EA position
4545 sldi r11,r11,5 ; Move this to the page position
4546
4547hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4548 mr. r0,r0 ; Is it locked?
4549 li r0,1 ; Get our lock word
4550 bne-- hpfTLBIE65 ; It is locked, go wait...
4551
4552 stwcx. r0,0,r9 ; Try to get it
4553 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4554 rldicl r8,r6,52,50 ; Isolate the address space ID
4555 bne-- hpfTLBIE64 ; We was beat...
4556 addi r10,r10,1 ; Bump the steal count
4557
4558 rldicl r11,r12,0,16 ; Clear cause the book says so
4559 li r0,0 ; Lock clear value
4560
4561 tlbie r11 ; Invalidate it everywhere
4562
55e303ae
A
4563 mr r7,r8 ; Get a copy of the space ID
4564 eieio ; Make sure that the tlbie happens first
4565 rldimi r7,r7,14,36 ; Copy address space to make hash value
4566 tlbsync ; Wait for everyone to catch up
4567 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
55e303ae
A
4568 srdi r2,r6,26 ; Shift original segment down to bottom
4569
4570 ptesync ; Make sure of it all
4571 xor r7,r7,r2 ; Compute original segment
91447636 4572 stw r0,tlbieLock(0) ; Clear the tlbie lock
55e303ae
A
4573
4574 stw r10,hwSteals(r4) ; Save the steal count
4575 bgt cr5,hpfInser64 ; We just stole a block mapping...
4576
4577 rldimi r12,r7,28,0 ; Insert decoded segment
4578 rldicl r4,r12,0,13 ; Trim to max supported address
4579
4580 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4581
4582 la r11,ppLink+4(r3) ; Point to the master RC copy
4583 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4584 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4585
4586hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
91447636 4587 li r12,ppLFAmask ; Get mask to clean up alias pointer
55e303ae 4588 or r0,r0,r2 ; Merge in the new RC
91447636 4589 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
55e303ae
A
4590 stwcx. r0,0,r11 ; Try to stick it back
4591 bne-- hpfMrgRC64 ; Try again if we collided...
4592
4593hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4594 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4595
4596 lhz r10,mpSpace(r7) ; Get the space
4597 ld r9,mpVAddr(r7) ; And the vaddr
4598 cmplw cr1,r10,r8 ; Is this one of ours?
4599 xor r9,r4,r9 ; Compare virtual address
4600 cmpldi r9,0x1000 ; See if we really match
4601 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4602 beq++ hpfFPnch2x ; Yes, found ours...
4603
4604 ld r7,mpAlias(r7) ; Chain on to the next
4605 b hpfFPnchx ; Check it out...
4606
4607 .align 5
4608
4609hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4610 stwcx. r7,0,r7 ; Kill reservation
4611
4612hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4613 mr. r0,r0 ; Is it locked?
4614 beq++ hpfTLBIE64 ; Yup, wait for it...
4615 b hpfTLBIE63 ; Nope, try again..
4616
4617
4618
4619hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4620 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4621 bl mapPhysUnlock ; Unlock the physent now
4622
4623
4624hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4625 eieio ; Make sure this gets there first
4626 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4627 mr r17,r18 ; Get the PCA image to set
4628 b hpfFinish ; Go join the common exit code...
4629
4630hpfLostPhys:
4631 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4632 ori r0,r0,lo16(Choke) ; System abend
4633 sc
4634
4635;
4636; This is the common code we execute when we are finished setting up the PTE.
4637;
4638
4639 .align 5
4640
4641hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4642 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4643 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4644 stw r4,mpPte(r31) ; Remember our PTE
4645
4646hpfBailOut: eieio ; Make sure all updates come first
4647 stw r17,0(r20) ; Unlock and set the final PCA
4648
4649;
4650; This is where we go if we have started processing the fault, but find that someone
4651; else has taken care of it.
4652;
4653
4654hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4655 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4656 sth r2,mpFlags+2(r31) ; Set it
4657
4658 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4659 bl sxlkUnlock ; Unlock the search list
4660
4661 li r11,T_IN_VAIN ; Say that it was handled
4662 b EXT(PFSExit) ; Leave...
4663
4664;
4665; This is where we go when we find that someone else
4666; is in the process of handling the fault.
4667;
4668
4669hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4670 stwcx. r3,0,r3 ; Do it
4671
4672 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4673 bl sxlkUnlock ; Unlock the search list
4674
4675 li r11,T_IN_VAIN ; Say that it was handled
4676 b EXT(PFSExit) ; Leave...
4677
91447636
A
4678;
4679; Guest shadow assist -- page fault handler
4680;
4681; Here we handle a fault in a guest pmap that has the guest shadow mapping
4682; assist active. We locate the VMM pmap extension block, which contains an
4683; index over the discontiguous multi-page shadow hash table. The index
4684; corresponding to our vaddr is selected, and the selected group within
4685; that page is searched for a valid and active entry that contains
4686; our vaddr and space id. The search is pipelined, so that we may fetch
4687; the next slot while examining the current slot for a hit. The final
4688; search iteration is unrolled so that we don't fetch beyond the end of
4689; our group, which could have dire consequences depending upon where the
4690; physical hash page is located.
4691;
4692; The VMM pmap extension block occupies a page. Begining at offset 0, we
4693; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4694; after the pmap_vmm_ext is the hash table physical address index, a
4695; linear list of 64-bit physical addresses of the pages that comprise
4696; the hash table.
4697;
4698; In the event that we succesfully locate a guest mapping, we re-join
4699; the page fault path at hpfGVfound with the mapping's address in r31;
4700; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4701; a share of the pmap search lock for the host pmap with the host pmap's
4702; address in r28, the guest pmap's space id in r21, and the guest pmap's
4703; flags in r12.
4704;
4705
4706 .align 5
4707hpfGVxlate:
4708 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4709
4710 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4711 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4712 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4713 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4714 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4715 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4716 lwz r6,vxsGpf(r11) ; Get guest fault count
4717
4718 srwi r3,r10,12 ; Form shadow hash:
4719 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4720 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4721 ; Form index offset from hash page number
4722 add r31,r31,r4 ; r31 <- hash page index entry
4723 lwz r31,4(r31) ; r31 <- hash page paddr
4724 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4725 ; r31 <- hash group paddr
4726
4727 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4728 bl sxlkShared ; Go get a shared lock on the mapping lists
4729 mr. r3,r3 ; Did we get the lock?
4730 bne- hpfBadLock ; Nope...
4731
4732 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4733 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4734 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4735 addi r6,r6,1 ; Increment guest fault count
4736 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4737 mtctr r0 ; in this group
4738 stw r6,vxsGpf(r11) ; Update guest fault count
4739 b hpfGVlp32
4740
4741 .align 5
4742hpfGVlp32:
4743 mr r6,r3 ; r6 <- current mapping slot's flags
4744 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4745 mr r7,r4 ; r7 <- current mapping slot's space ID
4746 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4747 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4748 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4749 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4750 xor r7,r7,r21 ; Compare space ID
4751 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4752 xor r8,r8,r10 ; Compare virtual address
4753 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4754 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4755
4756 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4757 bdnz hpfGVlp32 ; Iterate
4758
4759 clrrwi r5,r5,12 ; Remove flags from virtual address
4760 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4761 xor r4,r4,r21 ; Compare space ID
4762 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4763 xor r5,r5,r10 ; Compare virtual address
4764 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4765 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4766
4767 b hpfGVmiss
4768
4769 .align 5
4770hpfGV64:
4771 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4772 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4773 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4774 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4775 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4776 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4777 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4778 lwz r6,vxsGpf(r11) ; Get guest fault count
4779
4780 srwi r3,r10,12 ; Form shadow hash:
4781 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4782 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4783 ; Form index offset from hash page number
4784 add r31,r31,r4 ; r31 <- hash page index entry
4785 ld r31,0(r31) ; r31 <- hash page paddr
4786 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4787 ; r31 <- hash group paddr
4788
4789 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4790 bl sxlkShared ; Go get a shared lock on the mapping lists
4791 mr. r3,r3 ; Did we get the lock?
4792 bne-- hpfBadLock ; Nope...
55e303ae 4793
91447636
A
4794 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4795 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4796 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4797 addi r6,r6,1 ; Increment guest fault count
4798 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4799 mtctr r0 ; in this group
4800 stw r6,vxsGpf(r11) ; Update guest fault count
4801 b hpfGVlp64
4802
4803 .align 5
4804hpfGVlp64:
4805 mr r6,r3 ; r6 <- current mapping slot's flags
4806 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4807 mr r7,r4 ; r7 <- current mapping slot's space ID
4808 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4809 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4810 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4811 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4812 xor r7,r7,r21 ; Compare space ID
4813 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4814 xor r8,r8,r10 ; Compare virtual address
4815 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4816 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4817
4818 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4819 bdnz hpfGVlp64 ; Iterate
4820
4821 clrrdi r5,r5,12 ; Remove flags from virtual address
4822 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4823 xor r4,r4,r21 ; Compare space ID
4824 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4825 xor r5,r5,r10 ; Compare virtual address
4826 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4827 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4828
4829hpfGVmiss:
4830 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4831 addi r6,r6,1 ; Increment miss count
4832 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4833 b hpfNotFound
55e303ae
A
4834
4835/*
4836 * hw_set_user_space(pmap)
4837 * hw_set_user_space_dis(pmap)
4838 *
4839 * Indicate whether memory space needs to be switched.
4840 * We really need to turn off interrupts here, because we need to be non-preemptable
de355530
A
4841 *
4842 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4843 * register usage here. The VMM switch code in vmachmon.s that calls this
4844 * know what registers are in use. Check that if these change.
4845 */
1c79356b 4846
1c79356b 4847
55e303ae
A
4848
4849 .align 5
4850 .globl EXT(hw_set_user_space)
4851
4852LEXT(hw_set_user_space)
4853
4854 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4855 mfmsr r10 ; Get the current MSR
4856 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4857 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4858 andc r10,r10,r8 ; Turn off VEC, FP for good
4859 andc r9,r10,r9 ; Turn off EE also
4860 mtmsr r9 ; Disable them
4861 isync ; Make sure FP and vec are off
91447636
A
4862 mfsprg r6,1 ; Get the current activation
4863 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
55e303ae
A
4864 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4865 mfsprg r4,2 ; The the feature flags
4866 lwz r7,pmapvr(r3) ; Get the v to r translation
4867 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4868 mtcrf 0x80,r4 ; Get the Altivec flag
4869 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4870 cmplw cr1,r3,r2 ; Same address space as before?
4871 stw r7,ppUserPmap(r6) ; Show our real pmap address
4872 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4873 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4874 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4875 mtmsr r10 ; Restore interruptions
4876 beqlr-- cr1 ; Leave if the same address space or not Altivec
4877
4878 dssall ; Need to kill all data streams if adrsp changed
4879 sync
4880 blr ; Return...
4881
4882 .align 5
4883 .globl EXT(hw_set_user_space_dis)
4884
4885LEXT(hw_set_user_space_dis)
4886
4887 lwz r7,pmapvr(r3) ; Get the v to r translation
4888 mfsprg r4,2 ; The the feature flags
4889 lwz r8,pmapvr+4(r3) ; Get the v to r translation
91447636
A
4890 mfsprg r6,1 ; Get the current activation
4891 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
55e303ae
A
4892 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4893 mtcrf 0x80,r4 ; Get the Altivec flag
4894 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4895 cmplw cr1,r3,r2 ; Same address space as before?
4896 stw r7,ppUserPmap(r6) ; Show our real pmap address
4897 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4898 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4899 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4900 beqlr-- cr1 ; Leave if the same
4901
4902 dssall ; Need to kill all data streams if adrsp changed
4903 sync
4904 blr ; Return...
4905
4906/* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4907 *
4908 * Lock must already be held on mapping block list
4909 * returns 0 if all slots filled.
4910 * returns n if a slot is found and it is not the last
4911 * returns -n if a slot is found and it is the last
4912 * when n and -n are returned, the corresponding bit is cleared
4913 * the mapping is zeroed out before return
4914 *
4915 */
4916
4917 .align 5
4918 .globl EXT(mapalc1)
4919
4920LEXT(mapalc1)
4921 lwz r4,mbfree(r3) ; Get the 1st mask
4922 lis r0,0x8000 ; Get the mask to clear the first free bit
4923 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4924 mr r12,r3 ; Save the block ptr
4925 cntlzw r3,r4 ; Get first 1-bit in 1st word
4926 srw. r9,r0,r3 ; Get bit corresponding to first free one
4927 cntlzw r10,r5 ; Get first free field in second word
4928 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4929 bne mapalc1f ; Found one in 1st word
4930
4931 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4932 li r3,0 ; assume failure return
4933 andc r5,r5,r9 ; Turn it off
4934 beqlr-- ; There are no 1 bits left...
4935 addi r3,r10,32 ; set the correct number
4936
4937mapalc1f:
4938 or. r0,r4,r5 ; any more bits set?
4939 stw r4,mbfree(r12) ; update bitmasks
4940 stw r5,mbfree+4(r12)
4941
4942 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4943 addi r7,r6,32
4944 dcbz r6,r12 ; clear the 64-byte mapping
4945 dcbz r7,r12
4946
4947 bnelr++ ; return if another bit remains set
4948
4949 neg r3,r3 ; indicate we just returned the last bit
4950 blr
4951
4952
4953/* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4954 *
4955 * Lock must already be held on mapping block list
4956 * returns 0 if all slots filled.
4957 * returns n if a slot is found and it is not the last
4958 * returns -n if a slot is found and it is the last
4959 * when n and -n are returned, the corresponding bits are cleared
4960 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4961 * the mapping is zero'd out before return
4962 */
4963
4964 .align 5
4965 .globl EXT(mapalc2)
4966LEXT(mapalc2)
4967 lwz r4,mbfree(r3) ; Get the first mask
4968 lis r0,0x8000 ; Get the mask to clear the first free bit
4969 lwz r5,mbfree+4(r3) ; Get the second mask
4970 mr r12,r3 ; Save the block ptr
4971 slwi r6,r4,1 ; shift first word over
4972 and r6,r4,r6 ; lite start of double bit runs in 1st word
4973 slwi r7,r5,1 ; shift 2nd word over
4974 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4975 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4976 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4977 cntlzw r10,r7 ; Get first free field in second word
4978 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4979 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4980 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4981 bne mapalc2a ; Found two consecutive free bits in 1st word
4982
4983 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4984 li r3,0 ; assume failure
4985 srwi r11,r9,1 ; get mask for 2nd bit
4986 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4987 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4988 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4989 addi r3,r10,32 ; set the correct number
4990
4991mapalc2a:
4992 or. r0,r4,r5 ; any more bits set?
4993 stw r4,mbfree(r12) ; update bitmasks
4994 stw r5,mbfree+4(r12)
4995 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4996 addi r7,r6,32
4997 addi r8,r6,64
4998 addi r9,r6,96
4999 dcbz r6,r12 ; zero out the 128-byte mapping
5000 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
5001 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
5002 dcbz r9,r12
5003
5004 bnelr++ ; return if another bit remains set
5005
5006 neg r3,r3 ; indicate we just returned the last bit
5007 blr
5008
5009mapalc2c:
5010 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5011 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5012 beqlr ; no, we failed
5013 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5014 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5015 li r3,31 ; get index of this field
5016 b mapalc2a
5017
5018
5019;
5020; This routine initialzes the hash table and PCA.
5021; It is done here because we may need to be 64-bit to do it.
5022;
5023
5024 .align 5
5025 .globl EXT(hw_hash_init)
5026
5027LEXT(hw_hash_init)
5028
5029 mfsprg r10,2 ; Get feature flags
5030 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5031 mtcrf 0x02,r10 ; move pf64Bit to cr6
5032 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5033 lis r4,0xFF01 ; Set all slots free and start steal at end
5034 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5035 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5036
5037 lwz r12,0(r12) ; Get hash table size
5038 li r3,0 ; Get start
5039 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5040
5041 lwz r11,4(r11) ; Get hash table base
5042
5043hhiNext32: cmplw r3,r12 ; Have we reached the end?
5044 bge- hhiCPCA32 ; Yes...
5045 dcbz r3,r11 ; Clear the line
5046 addi r3,r3,32 ; Next one...
5047 b hhiNext32 ; Go on...
5048
5049hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5050 li r3,-4 ; Displacement to first PCA entry
5051 neg r12,r12 ; Get negative end of PCA
5052
5053hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5054 subi r3,r3,4 ; Next slot
5055 cmpw r3,r12 ; Have we finished?
5056 bge+ hhiNPCA32 ; Not yet...
5057 blr ; Leave...
5058
5059hhiSF: mfmsr r9 ; Save the MSR
5060 li r8,1 ; Get a 1
5061 mr r0,r9 ; Get a copy of the MSR
5062 ld r11,0(r11) ; Get hash table base
5063 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5064 mtmsrd r0 ; Turn on SF
5065 isync
5066
5067
5068hhiNext64: cmpld r3,r12 ; Have we reached the end?
5069 bge-- hhiCPCA64 ; Yes...
5070 dcbz128 r3,r11 ; Clear the line
5071 addi r3,r3,128 ; Next one...
5072 b hhiNext64 ; Go on...
5073
5074hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5075 li r3,-4 ; Displacement to first PCA entry
5076 neg r12,r12 ; Get negative end of PCA
5077
5078hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5079 subi r3,r3,4 ; Next slot
5080 cmpd r3,r12 ; Have we finished?
5081 bge++ hhiNPCA64 ; Not yet...
5082
5083 mtmsrd r9 ; Turn off SF if it was off
5084 isync
5085 blr ; Leave...
5086
5087
5088;
5089; This routine sets up the hardware to start translation.
5090; Note that we do NOT start translation.
5091;
5092
5093 .align 5
5094 .globl EXT(hw_setup_trans)
5095
5096LEXT(hw_setup_trans)
5097
5098 mfsprg r11,0 ; Get the per_proc block
5099 mfsprg r12,2 ; Get feature flags
5100 li r0,0 ; Get a 0
5101 li r2,1 ; And a 1
5102 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5103 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5104 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5105 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5106 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5107
5108 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5109
5110 li r9,0 ; Clear out a register
5111 sync
5112 isync
5113 mtdbatu 0,r9 ; Invalidate maps
5114 mtdbatl 0,r9 ; Invalidate maps
5115 mtdbatu 1,r9 ; Invalidate maps
5116 mtdbatl 1,r9 ; Invalidate maps
5117 mtdbatu 2,r9 ; Invalidate maps
5118 mtdbatl 2,r9 ; Invalidate maps
5119 mtdbatu 3,r9 ; Invalidate maps
5120 mtdbatl 3,r9 ; Invalidate maps
5121
5122 mtibatu 0,r9 ; Invalidate maps
5123 mtibatl 0,r9 ; Invalidate maps
5124 mtibatu 1,r9 ; Invalidate maps
5125 mtibatl 1,r9 ; Invalidate maps
5126 mtibatu 2,r9 ; Invalidate maps
5127 mtibatl 2,r9 ; Invalidate maps
5128 mtibatu 3,r9 ; Invalidate maps
5129 mtibatl 3,r9 ; Invalidate maps
5130
5131 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5132 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5133 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5134 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5135 lwz r11,4(r11) ; Get hash table base
5136 lwz r12,0(r12) ; Get hash table size
5137 subi r12,r12,1 ; Back off by 1
5138 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5139
5140 mtsdr1 r11 ; Ok, we now have the hash table set up
5141 sync
5142
5143 li r12,invalSpace ; Get the invalid segment value
5144 li r10,0 ; Start low
5145
5146hstsetsr: mtsrin r12,r10 ; Set the SR
5147 addis r10,r10,0x1000 ; Bump the segment
5148 mr. r10,r10 ; Are we finished?
5149 bne+ hstsetsr ; Nope...
5150 sync
5151 blr ; Return...
5152
5153;
5154; 64-bit version
5155;
5156
5157hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5158 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5159 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5160 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5161 ld r11,0(r11) ; Get hash table base
5162 lwz r12,0(r12) ; Get hash table size
5163 cntlzw r10,r12 ; Get the number of bits
5164 subfic r10,r10,13 ; Get the extra bits we need
5165 or r11,r11,r10 ; Add the size field to SDR1
5166
5167 mtsdr1 r11 ; Ok, we now have the hash table set up
5168 sync
5169
5170 li r0,0 ; Set an SLB slot index of 0
5171 slbia ; Trash all SLB entries (except for entry 0 that is)
5172 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5173 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5174 slbie r7 ; Invalidate it
5175
5176 blr ; Return...
5177
5178
5179;
5180; This routine turns on translation for the first time on a processor
5181;
5182
5183 .align 5
5184 .globl EXT(hw_start_trans)
5185
5186LEXT(hw_start_trans)
5187
5188
5189 mfmsr r10 ; Get the msr
5190 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5191
5192 mtmsr r10 ; Everything falls apart here
5193 isync
5194
5195 blr ; Back to it.
5196
5197
5198
5199;
5200; This routine validates a segment register.
5201; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5202;
5203; r3 = virtual pmap
5204; r4 = segment[0:31]
5205; r5 = segment[32:63]
5206; r6 = va[0:31]
5207; r7 = va[32:63]
5208;
5209; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5210; Note that there is no reason to apply the key modifier here because this is only
5211; used for kernel accesses.
5212;
5213
5214 .align 5
5215 .globl EXT(hw_map_seg)
5216
5217LEXT(hw_map_seg)
5218
5219 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5220 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5221 mfsprg r10,2 ; Get feature flags
55e303ae
A
5222
5223;
5224; Note: the following code would problably be easier to follow if I split it,
5225; but I just wanted to see if I could write this to work on both 32- and 64-bit
5226; machines combined.
5227;
5228
5229;
5230; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5231; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5232
5233 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5234 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5235 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5236 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5237 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5238 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5239 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5240 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5241 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5242 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5243
5244 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5245 ; concatenated together. There is garbage
5246 ; at the top for 64-bit but we will clean
5247 ; that out later.
5248 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5249
5250
5251;
5252; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5253; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5254;
5255
5256;
5257; What we have now is:
5258;
5259; 0 0 1 2 3 4 4 5 6
5260; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5261; +--------+--------+--------+--------+--------+--------+--------+--------+
5262; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5263; +--------+--------+--------+--------+--------+--------+--------+--------+
5264; 0 0 1 2 3 - for 32-bit machines
5265; 0 8 6 4 1
5266;
5267; 0 0 1 2 3 4 4 5 6
5268; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5269; +--------+--------+--------+--------+--------+--------+--------+--------+
5270; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5271; +--------+--------+--------+--------+--------+--------+--------+--------+
5272; 0 0 1 2 3 - for 32-bit machines
5273; 0 8 6 4 1
5274;
5275; 0 0 1 2 3 4 4 5 6
5276; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5277; +--------+--------+--------+--------+--------+--------+--------+--------+
5278; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5279; +--------+--------+--------+--------+--------+--------+--------+--------+
5280; 0 0 1 2 3 - for 32-bit machines
5281; 0 8 6 4 1
5282
5283
5284 xor r8,r8,r2 ; Calculate VSID
5285
5286 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
91447636 5287 mfsprg r12,0 ; Get the per_proc
55e303ae
A
5288 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5289 mfmsr r6 ; Get current MSR
5290 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5291 mtmsrd r0,1 ; Set only the EE bit to 0
5292 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5293 mfmsr r11 ; Get the MSR right now, after disabling EE
5294 andc r2,r11,r2 ; Turn off translation now
5295 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5296 or r11,r11,r6 ; Turn on the EE bit if it was on
5297 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5298 isync ; Hang out a bit
5299
5300 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5301 sldi r9,r9,9 ; Position the key and noex bit
5302
5303 rldimi r5,r8,12,0 ; Form the VSID/key
5304
5305 not r3,r6 ; Make valids be 0s
5306
5307 cntlzd r7,r3 ; Find a free SLB
5308 cmplwi r7,63 ; Did we find a free SLB entry?
5309
5310 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5311
5312 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5313 addi r7,r7,1 ; Make sure we skip slb 0
5314 blt++ hmsFreeSeg ; Yes, go load it...
5315
5316;
5317; No free SLB entries, select one that is in use and invalidate it
5318;
5319 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5320 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5321 addi r2,r2,1 ; Set next slot to steal
5322 slbmfee r3,r7 ; Get the entry that is in the selected spot
5323 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5324 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5325 srawi r8,r8,31 ; Get -1 if steal index still in range
5326 slbie r3 ; Invalidate the in-use SLB entry
5327 and r2,r2,r8 ; Reset steal index when it should wrap
5328 isync ;
5329
5330 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5331;
5332; We are now ready to stick the SLB entry in the SLB and mark it in use
5333;
5334
5335hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5336 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5337 srd r0,r0,r2 ; Set bit mask for allocation
5338 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5339 or r6,r6,r0 ; Turn on the allocation flag
5340
5341 slbmte r5,r4 ; Make that SLB entry
5342
5343 std r6,validSegs(r12) ; Mark as valid
5344 mtmsrd r11 ; Restore the MSR
5345 isync
5346 blr ; Back to it...
5347
5348 .align 5
5349
91447636
A
5350hms32bit:
5351 mfsprg r12,1 ; Get the current activation
5352 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5353 rlwinm r8,r8,0,8,31 ; Clean up the VSID
55e303ae
A
5354 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5355 lis r0,0x8000 ; Set bit 0
5356 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5357 srw r0,r0,r2 ; Get bit corresponding to SR
5358 addi r7,r12,validSegs ; Point to the valid segment flags directly
5359
5360 mtsrin r8,r4 ; Set the actual SR
5361 isync ; Need to make sure this is done
5362
5363hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5364 or r6,r6,r0 ; Show that SR is valid
5365 stwcx. r6,0,r7 ; Set the valid SR flags
5366 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5367
5368 blr ; Back to it...
5369
5370
5371;
5372; This routine invalidates a segment register.
5373;
5374
5375 .align 5
5376 .globl EXT(hw_blow_seg)
5377
5378LEXT(hw_blow_seg)
5379
5380 mfsprg r10,2 ; Get feature flags
55e303ae
A
5381 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5382
55e303ae
A
5383 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5384
5385 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5386
5387 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5388 mfmsr r6 ; Get current MSR
5389 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5390 mtmsrd r0,1 ; Set only the EE bit to 0
5391 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5392 mfmsr r11 ; Get the MSR right now, after disabling EE
5393 andc r2,r11,r2 ; Turn off translation now
5394 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5395 or r11,r11,r6 ; Turn on the EE bit if it was on
5396 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5397 isync ; Hang out a bit
5398
5399 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5400
5401 slbie r9 ; Invalidate the associated SLB entry
5402
5403 mtmsrd r11 ; Restore the MSR
5404 isync
5405 blr ; Back to it.
5406
5407 .align 5
5408
91447636
A
5409hbs32bit:
5410 mfsprg r12,1 ; Get the current activation
5411 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5412 addi r7,r12,validSegs ; Point to the valid segment flags directly
5413 lwarx r4,0,r7 ; Get and reserve the valid segment flags
55e303ae
A
5414 rlwinm r6,r9,4,28,31 ; Convert segment to number
5415 lis r2,0x8000 ; Set up a mask
5416 srw r2,r2,r6 ; Make a mask
5417 and. r0,r4,r2 ; See if this is even valid
5418 li r5,invalSpace ; Set the invalid address space VSID
5419 beqlr ; Leave if already invalid...
5420
5421 mtsrin r5,r9 ; Slam the segment register
5422 isync ; Need to make sure this is done
5423
5424hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5425 stwcx. r4,0,r7 ; Set the valid SR flags
5426 beqlr++ ; Stored ok, no interrupt, time to leave...
5427
5428 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5429 b hbsrupt ; Try again...
5430
5431;
5432; This routine invadates the entire pmap segment cache
5433;
5434; Translation is on, interrupts may or may not be enabled.
5435;
5436
5437 .align 5
5438 .globl EXT(invalidateSegs)
5439
5440LEXT(invalidateSegs)
5441
5442 la r10,pmapCCtl(r3) ; Point to the segment cache control
5443 eqv r2,r2,r2 ; Get all foxes
5444
5445isInv: lwarx r4,0,r10 ; Get the segment cache control value
5446 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5447 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5448 bne-- isInv0 ; Yes, try again...
5449
5450 stwcx. r4,0,r10 ; Try to invalidate it
5451 bne-- isInv ; Someone else just stuffed it...
5452 blr ; Leave...
5453
5454
5455isInv0: li r4,lgKillResv ; Get reservation kill zone
5456 stwcx. r4,0,r4 ; Kill reservation
5457
5458isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5459 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5460 bne-- isInv ; Nope...
5461 b isInv1 ; Still locked do it again...
5462
5463;
5464; This routine switches segment registers between kernel and user.
5465; We have some assumptions and rules:
5466; We are in the exception vectors
5467; pf64Bitb is set up
5468; R3 contains the MSR we going to
5469; We can not use R4, R13, R20, R21, R29
5470; R13 is the savearea
5471; R29 has the per_proc
5472;
5473; We return R3 as 0 if we did not switch between kernel and user
5474; We also maintain and apply the user state key modifier used by VMM support;
5475; If we go to the kernel it is set to 0, otherwise it follows the bit
5476; in spcFlags.
5477;
5478
d7e50217 5479 .align 5
55e303ae 5480 .globl EXT(switchSegs)
1c79356b 5481
55e303ae
A
5482LEXT(switchSegs)
5483
5484 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5485 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5486 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5487 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5488 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5489 or r2,r2,r3 ; This will 1 if we will be using user segments
5490 li r3,0 ; Get a selection mask
5491 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5492 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5493 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5494 la r19,ppUserPmap(r29) ; Point to the current user pmap
5495
5496; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5497 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5498
5499 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5500 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5501 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5502 or r8,r8,r19 ; Get the pointer to the pmap we are using
5503
5504 beqlr ; We are staying in the same mode, do not touch segs...
5505
5506 lwz r28,0(r8) ; Get top half of pmap address
5507 lwz r10,4(r8) ; Get bottom half
5508
5509 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5510 rlwinm r28,r28,0,1,0 ; Copy top to top
5511 stw r30,ppMapFlags(r29) ; Set the key modifier
5512 rlwimi r28,r10,0,0,31 ; Insert bottom
5513
5514 la r10,pmapCCtl(r28) ; Point to the segment cache control
5515 la r9,pmapSegCache(r28) ; Point to the segment cache
5516
5517ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5518 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5519 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5520 bne-- ssgLock0 ; Yup, this is in use...
5521
5522 stwcx. r16,0,r10 ; Try to set the lock
5523 bne-- ssgLock ; Did we get contention?
5524
5525 not r11,r15 ; Invert the invalids to valids
5526 li r17,0 ; Set a mask for the SRs we are loading
5527 isync ; Make sure we are all caught up
5528
5529 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5530
5531 li r0,0 ; Clear
5532 slbia ; Trash all SLB entries (except for entry 0 that is)
5533 li r17,1 ; Get SLB index to load (skip slb 0)
5534 oris r0,r0,0x8000 ; Get set for a mask
5535 b ssg64Enter ; Start on a cache line...
d7e50217
A
5536
5537 .align 5
d7e50217 5538
55e303ae
A
5539ssgLock0: li r15,lgKillResv ; Killing field
5540 stwcx. r15,0,r15 ; Kill reservation
d7e50217 5541
55e303ae
A
5542ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5543 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5544 beq++ ssgLock ; Yup, this is in use...
5545 b ssgLock1 ; Nope, try again...
5546;
5547; This is the 32-bit address space switch code.
5548; We take a reservation on the segment cache and walk through.
5549; For each entry, we load the specified entries and remember which
5550; we did with a mask. Then, we figure out which segments should be
5551; invalid and then see which actually are. Then we load those with the
5552; defined invalid VSID.
5553; Afterwards, we unlock the segment cache.
5554;
d7e50217 5555
55e303ae
A
5556 .align 5
5557
5558ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5559 cmplwi r12,pmapSegCacheUse ; See if we are done
5560 slwi r14,r12,4 ; Index to the cache slot
5561 lis r0,0x8000 ; Get set for a mask
5562 add r14,r14,r9 ; Point to the entry
5563
5564 bge- ssg32Done ; All done...
5565
5566 lwz r5,sgcESID+4(r14) ; Get the ESID part
5567 srw r2,r0,r12 ; Form a mask for the one we are loading
5568 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5569
5570 andc r11,r11,r2 ; Clear the bit
5571 lwz r6,sgcVSID(r14) ; And get the VSID top
5572
5573 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5574
5575 xor r7,r7,r30 ; Modify the key before we actually set it
5576 srw r0,r0,r2 ; Get a mask for the SR we are loading
5577 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5578 or r17,r17,r0 ; Remember the segment
5579 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5580 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5581
5582 mtsrin r8,r5 ; Load the segment
5583 b ssg32Enter ; Go enter the next...
5584
5585 .align 5
5586
5587ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5588 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5589
5590 lis r0,0x8000 ; Get set for a mask
5591 li r2,invalSpace ; Set the invalid address space VSID
5592
5593 nop ; Align loop
5594 nop ; Align loop
5595 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5596 nop ; Align loop
5597
5598ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5599 cmplwi r18,16 ; Have we finished?
5600 srw r22,r0,r18 ; Get the mask bit
5601 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5602 andc r16,r16,r22 ; Get rid of the guy we just did
5603 bge ssg32Really ; Yes, we are really done now...
5604
5605 mtsrin r2,r23 ; Invalidate the SR
5606 b ssg32Inval ; Do the next...
5607
5608 .align 5
5609
5610ssg32Really:
5611 stw r17,validSegs(r29) ; Set the valid SR flags
5612 li r3,1 ; Set kernel/user transition
5613 blr
5614
5615;
5616; This is the 64-bit address space switch code.
5617; First we blow away all of the SLB entries.
5618; Walk through,
5619; loading the SLB. Afterwards, we release the cache lock
5620;
5621; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5622; Its a performance thing...
5623;
1c79356b
A
5624
5625 .align 5
1c79356b 5626
55e303ae
A
5627ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5628 cmplwi r12,pmapSegCacheUse ; See if we are done
5629 slwi r14,r12,4 ; Index to the cache slot
5630 srw r16,r0,r12 ; Form a mask for the one we are loading
5631 add r14,r14,r9 ; Point to the entry
5632 andc r11,r11,r16 ; Clear the bit
5633 bge-- ssg64Done ; All done...
5634
5635 ld r5,sgcESID(r14) ; Get the ESID part
5636 ld r6,sgcVSID(r14) ; And get the VSID part
5637 oris r5,r5,0x0800 ; Turn on the valid bit
5638 or r5,r5,r17 ; Insert the SLB slot
5639 xor r6,r6,r30 ; Modify the key before we actually set it
5640 addi r17,r17,1 ; Bump to the next slot
5641 slbmte r6,r5 ; Make that SLB entry
5642 b ssg64Enter ; Go enter the next...
1c79356b 5643
55e303ae 5644 .align 5
d7e50217 5645
55e303ae 5646ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
d7e50217 5647
55e303ae
A
5648 eqv r16,r16,r16 ; Load up with all foxes
5649 subfic r17,r17,64 ; Get the number of 1 bits we need
5650
5651 sld r16,r16,r17 ; Get a mask for the used SLB entries
5652 li r3,1 ; Set kernel/user transition
5653 std r16,validSegs(r29) ; Set the valid SR flags
1c79356b
A
5654 blr
5655
55e303ae
A
5656;
5657; mapSetUp - this function sets initial state for all mapping functions.
5658; We turn off all translations (physical), disable interruptions, and
5659; enter 64-bit mode if applicable.
5660;
5661; We also return the original MSR in r11, the feature flags in R12,
5662; and CR6 set up so we can do easy branches for 64-bit
91447636 5663; hw_clear_maps assumes r10, r9 will not be trashed.
55e303ae
A
5664;
5665
5666 .align 5
5667 .globl EXT(mapSetUp)
5668
5669LEXT(mapSetUp)
5670
5671 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5672 mfsprg r12,2 ; Get feature flags
5673 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5674 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5675 mfmsr r11 ; Save the MSR
5676 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5677 andc r11,r11,r0 ; Clear VEC and FP for good
5678 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5679 li r2,1 ; Prepare for 64 bit
5680 andc r0,r11,r0 ; Clear the rest
5681 bt pfNoMSRirb,msuNoMSR ; No MSR...
5682 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
d7e50217 5683
55e303ae
A
5684 mtmsr r0 ; Translation and all off
5685 isync ; Toss prefetch
5686 blr ; Return...
5687
5688 .align 5
5689
5690msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5691 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5692 isync ; synchronize
5693 blr ; Return...
5694
5695 .align 5
5696
5697msuNoMSR: mr r2,r3 ; Save R3 across call
5698 mr r3,r0 ; Get the new MSR value
5699 li r0,loadMSR ; Get the MSR setter SC
5700 sc ; Set it
5701 mr r3,r2 ; Restore R3
5702 blr ; Go back all set up...
5703
5704
91447636
A
5705;
5706; Guest shadow assist -- remove all guest mappings
5707;
5708; Remove all mappings for a guest pmap from the shadow hash table.
5709;
5710; Parameters:
5711; r3 : address of pmap, 32-bit kernel virtual address
5712;
5713; Non-volatile register usage:
5714; r24 : host pmap's physical address
5715; r25 : VMM extension block's physical address
5716; r26 : physent address
5717; r27 : guest pmap's space ID number
5718; r28 : current hash table page index
5719; r29 : guest pmap's physical address
5720; r30 : saved msr image
5721; r31 : current mapping
5722;
5723 .align 5
5724 .globl EXT(hw_rem_all_gv)
5725
5726LEXT(hw_rem_all_gv)
5727
5728#define graStackSize ((31-24+1)*4)+4
5729 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5730 ; Mint a new stack frame
5731 mflr r0 ; Get caller's return address
5732 mfsprg r11,2 ; Get feature flags
5733 mtcrf 0x02,r11 ; Insert feature flags into cr6
5734 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5735 ; Save caller's return address
5736 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5737 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5738 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5739 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5740 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5741 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5742 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5743 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5744
5745 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5746
5747 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5748 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5749 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5750 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5751 b graStart ; Get to it
5752gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5753 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5754 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5755graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5756 xor r29,r3,r9 ; Convert pmap_t virt->real
5757 mr r30,r11 ; Save caller's msr image
5758
5759 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5760 bl sxlkExclusive ; Get lock exclusive
5761
5762 lwz r3,vxsGra(r25) ; Get remove all count
5763 addi r3,r3,1 ; Increment remove all count
5764 stw r3,vxsGra(r25) ; Update remove all count
5765
5766 li r28,0 ; r28 <- first hash page table index to search
5767 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5768graPgLoop:
5769 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5770 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5771 ; Convert page index into page physical index offset
5772 add r31,r31,r11 ; Calculate page physical index entry address
5773 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5774 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5775 b graLoop ; Examine all slots in this page
5776gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5777 b graLoop ; Examine all slots in this page
5778
5779 .align 5
5780graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5781 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5782 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5783 xor r4,r4,r27 ; Compare space ID number
5784 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5785 bne graMiss ; Not one of ours, skip it
5786
5787 lwz r11,vxsGraHits(r25) ; Get remove hit count
5788 addi r11,r11,1 ; Increment remove hit count
5789 stw r11,vxsGraHits(r25) ; Update remove hit count
5790
5791 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5792 bne graRemPhys ; Yes, nothing to disconnect
5793
5794 lwz r11,vxsGraActive(r25) ; Get remove active count
5795 addi r11,r11,1 ; Increment remove hit count
5796 stw r11,vxsGraActive(r25) ; Update remove hit count
5797
5798 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5799 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5800 ; r31 <- mapping's physical address
5801 ; r3 -> PTE slot physical address
5802 ; r4 -> High-order 32 bits of PTE
5803 ; r5 -> Low-order 32 bits of PTE
5804 ; r6 -> PCA
5805 ; r7 -> PCA physical address
5806 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5807 b graFreePTE ; Join 64-bit path to release the PTE
5808graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5809 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5810graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5811 beq- graRemPhys ; No valid PTE, we're almost done
5812 lis r0,0x8000 ; Prepare free bit for this slot
5813 srw r0,r0,r2 ; Position free bit
5814 or r6,r6,r0 ; Set it in our PCA image
5815 lwz r8,mpPte(r31) ; Get PTE pointer
5816 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5817 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5818 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5819 stw r6,0(r7) ; Update PCA and unlock the PTEG
5820
5821graRemPhys:
5822 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5823 bl mapFindLockPN ; Find 'n' lock this page's physent
5824 mr. r26,r3 ; Got lock on our physent?
5825 beq-- graBadPLock ; No, time to bail out
5826
5827 crset cr1_eq ; cr1_eq <- previous link is the anchor
5828 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5829 la r11,ppLink+4(r26) ; Point to chain anchor
5830 lwz r9,ppLink+4(r26) ; Get chain anchor
5831 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5832
5833graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5834 cmplw r9,r31 ; Is this the mapping to remove?
5835 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5836 bne graRemNext ; No, chain onward
5837 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5838 stw r8,0(r11) ; Unchain gpv->phys mapping
5839 b graRemoved ; Exit loop
5840graRemRetry:
5841 lwarx r0,0,r11 ; Get previous link
5842 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5843 stwcx. r0,0,r11 ; Update previous link
5844 bne- graRemRetry ; Lost reservation, retry
5845 b graRemoved ; Good work, let's get outta here
5846
5847graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5848 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5849 mr. r9,r8 ; Does next entry exist?
5850 b graRemLoop ; Carry on
5851
5852graRemove64:
5853 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5854 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5855 la r11,ppLink(r26) ; Point to chain anchor
5856 ld r9,ppLink(r26) ; Get chain anchor
5857 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5858graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5859 cmpld r9,r31 ; Is this the mapping to remove?
5860 ld r8,mpAlias(r9) ; Get forward chain pinter
5861 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5862 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5863 std r8,0(r11) ; Unchain gpv->phys mapping
5864 b graRemoved ; Exit loop
5865graRem64Rt: ldarx r0,0,r11 ; Get previous link
5866 and r0,r0,r7 ; Get flags
5867 or r0,r0,r8 ; Insert new forward pointer
5868 stdcx. r0,0,r11 ; Slam it back in
5869 bne-- graRem64Rt ; Lost reservation, retry
5870 b graRemoved ; Good work, let's go home
5871
5872graRem64Nxt:
5873 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5874 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5875 mr. r9,r8 ; Does next entry exist?
5876 b graRem64Lp ; Carry on
5877
5878graRemoved:
5879 mr r3,r26 ; r3 <- physent's address
5880 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5881
5882 lwz r3,mpFlags(r31) ; Get mapping's flags
5883 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5884 ori r3,r3,mpgFree ; Mark mapping free
5885 stw r3,mpFlags(r31) ; Update flags
5886
5887graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5888 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5889 bne graLoop ; No, examine next slot
5890 addi r28,r28,1 ; Increment hash table page index
5891 cmplwi r28,GV_HPAGES ; End of hash table?
5892 bne graPgLoop ; Examine next hash table page
5893
5894 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5895 bl sxlkUnlock ; Release host pmap's search lock
5896
5897 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5898 mtmsr r30 ; Restore 'rupts, translation
5899 isync ; Throw a small wrench into the pipeline
5900 b graPopFrame ; Nothing to do now but pop a frame and return
5901graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5902graPopFrame:
5903 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5904 ; Get caller's return address
5905 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5906 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5907 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5908 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5909 mtlr r0 ; Prepare return address
5910 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5911 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5912 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5913 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5914 lwz r1,0(r1) ; Pop stack frame
5915 blr ; Return to caller
5916
5917graBadPLock:
5918graRemoveMiss:
5919 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5920 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5921 li r3,failMapping ; The BOMB, Dmitri.
5922 sc ; The hydrogen bomb.
5923
5924
5925;
5926; Guest shadow assist -- remove local guest mappings
5927;
5928; Remove local mappings for a guest pmap from the shadow hash table.
5929;
5930; Parameters:
5931; r3 : address of guest pmap, 32-bit kernel virtual address
5932;
5933; Non-volatile register usage:
5934; r20 : current active map word's physical address
5935; r21 : current hash table page address
5936; r22 : updated active map word in process
5937; r23 : active map word in process
5938; r24 : host pmap's physical address
5939; r25 : VMM extension block's physical address
5940; r26 : physent address
5941; r27 : guest pmap's space ID number
5942; r28 : current active map index
5943; r29 : guest pmap's physical address
5944; r30 : saved msr image
5945; r31 : current mapping
5946;
5947 .align 5
5948 .globl EXT(hw_rem_local_gv)
5949
5950LEXT(hw_rem_local_gv)
5951
5952#define grlStackSize ((31-20+1)*4)+4
5953 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5954 ; Mint a new stack frame
5955 mflr r0 ; Get caller's return address
5956 mfsprg r11,2 ; Get feature flags
5957 mtcrf 0x02,r11 ; Insert feature flags into cr6
5958 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5959 ; Save caller's return address
5960 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5961 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5962 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5963 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5964 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5965 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5966 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5967 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5968 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
5969 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
5970 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
5971 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
5972
5973 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5974
5975 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
5976 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5977 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5978 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5979 b grlStart ; Get to it
5980grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5981 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5982 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5983
5984grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5985 xor r29,r3,r9 ; Convert pmap_t virt->real
5986 mr r30,r11 ; Save caller's msr image
5987
5988 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5989 bl sxlkExclusive ; Get lock exclusive
5990
5991 li r28,0 ; r28 <- index of first active map word to search
5992 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5993 b grlMap1st ; Examine first map word
5994
5995 .align 5
5996grlNextMap: stw r22,0(r21) ; Save updated map word
5997 addi r28,r28,1 ; Increment map word index
5998 cmplwi r28,GV_MAP_WORDS ; See if we're done
5999 beq grlDone ; Yup, let's get outta here
6000
6001grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
6002 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
6003 ; Convert map index into map index offset
6004 add r20,r20,r11 ; Calculate map array element address
6005 lwz r22,0(r20) ; Get active map word at index
6006 mr. r23,r22 ; Any active mappings indicated?
6007 beq grlNextMap ; Nope, check next word
6008
6009 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6010 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6011 ; Extract page index from map word index and convert
6012 ; into page physical index offset
6013 add r21,r21,r11 ; Calculate page physical index entry address
6014 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6015 lwz r21,4(r21) ; Get selected hash table page's address
6016 b grlLoop ; Examine all slots in this page
6017grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6018 b grlLoop ; Examine all slots in this page
6019
6020 .align 5
6021grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6022 cmplwi r11,32 ; Any active mappings left in this word?
6023 lis r12,0x8000 ; Prepare mask to reset bit
6024 srw r12,r12,r11 ; Position mask bit
6025 andc r23,r23,r12 ; Reset lit bit
6026 beq grlNextMap ; No bits lit, examine next map word
6027
6028 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6029 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6030 ; Extract slot band number from index and insert
6031 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6032
6033 lwz r3,mpFlags(r31) ; Get mapping's flags
6034 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6035 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6036 xor r4,r4,r27 ; Compare space ID number
6037 or. r4,r4,r5 ; (space id miss || global)
6038 bne grlLoop ; Not one of ours, skip it
6039 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6040 ori r3,r3,mpgDormant ; Mark entry dormant
6041 stw r3,mpFlags(r31) ; Update mapping's flags
6042
6043 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6044 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6045 ; r31 <- mapping's physical address
6046 ; r3 -> PTE slot physical address
6047 ; r4 -> High-order 32 bits of PTE
6048 ; r5 -> Low-order 32 bits of PTE
6049 ; r6 -> PCA
6050 ; r7 -> PCA physical address
6051 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6052 b grlFreePTE ; Join 64-bit path to release the PTE
6053grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6054 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6055grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6056 beq- grlLoop ; No valid PTE, we're done with this mapping
6057 lis r0,0x8000 ; Prepare free bit for this slot
6058 srw r0,r0,r2 ; Position free bit
6059 or r6,r6,r0 ; Set it in our PCA image
6060 lwz r8,mpPte(r31) ; Get PTE pointer
6061 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6062 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6063 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6064 stw r6,0(r7) ; Update PCA and unlock the PTEG
6065 b grlLoop ; On to next active mapping in this map word
6066
6067grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6068 bl sxlkUnlock ; Release host pmap's search lock
6069
6070 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6071 mtmsr r30 ; Restore 'rupts, translation
6072 isync ; Throw a small wrench into the pipeline
6073 b grlPopFrame ; Nothing to do now but pop a frame and return
6074grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6075grlPopFrame:
6076 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6077 ; Get caller's return address
6078 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6079 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6080 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6081 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6082 mtlr r0 ; Prepare return address
6083 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6084 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6085 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6086 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6087 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6088 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6089 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6090 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6091 lwz r1,0(r1) ; Pop stack frame
6092 blr ; Return to caller
6093
6094
6095;
6096; Guest shadow assist -- resume a guest mapping
6097;
6098; Locates the specified dormant mapping, and if it exists validates it and makes it
6099; active.
6100;
6101; Parameters:
6102; r3 : address of host pmap, 32-bit kernel virtual address
6103; r4 : address of guest pmap, 32-bit kernel virtual address
6104; r5 : host virtual address, high-order 32 bits
6105; r6 : host virtual address, low-order 32 bits
6106; r7 : guest virtual address, high-order 32 bits
6107; r8 : guest virtual address, low-order 32 bits
6108; r9 : guest mapping protection code
6109;
6110; Non-volatile register usage:
6111; r23 : VMM extension block's physical address
6112; r24 : physent physical address
6113; r25 : caller's msr image from mapSetUp
6114; r26 : guest mapping protection code
6115; r27 : host pmap physical address
6116; r28 : guest pmap physical address
6117; r29 : host virtual address
6118; r30 : guest virtual address
6119; r31 : gva->phys mapping's physical address
6120;
6121 .align 5
6122 .globl EXT(hw_res_map_gv)
6123
6124LEXT(hw_res_map_gv)
6125
6126#define grsStackSize ((31-23+1)*4)+4
6127
6128 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6129 ; Mint a new stack frame
6130 mflr r0 ; Get caller's return address
6131 mfsprg r11,2 ; Get feature flags
6132 mtcrf 0x02,r11 ; Insert feature flags into cr6
6133 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6134 ; Save caller's return address
6135 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6136 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6137 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6138 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6139 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6140 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6141 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6142 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6143 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6144
6145 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6146 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6147 mr r26,r9 ; Copy guest mapping protection code
6148
6149 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6150 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6151 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6152 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6153 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6154 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6155 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6156 srwi r11,r30,12 ; Form shadow hash:
6157 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6158 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6159 ; Form index offset from hash page number
6160 add r31,r31,r10 ; r31 <- hash page index entry
6161 lwz r31,4(r31) ; r31 <- hash page paddr
6162 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6163 ; r31 <- hash group paddr
6164 b grsStart ; Get to it
6165
6166grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6167 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6168 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6169 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6170 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6171 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6172 srwi r11,r30,12 ; Form shadow hash:
6173 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6174 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6175 ; Form index offset from hash page number
6176 add r31,r31,r10 ; r31 <- hash page index entry
6177 ld r31,0(r31) ; r31 <- hash page paddr
6178 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6179 ; r31 <- hash group paddr
6180
6181grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6182 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6183 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6184 mr r25,r11 ; Save caller's msr image
6185
6186 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6187 bl sxlkExclusive ; Get lock exclusive
6188
6189 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6190 mtctr r0 ; in this group
6191 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6192
6193 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6194 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6195 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6196 b grs32SrchLp ; Let the search begin!
6197
6198 .align 5
6199grs32SrchLp:
6200 mr r6,r3 ; r6 <- current mapping slot's flags
6201 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6202 mr r7,r4 ; r7 <- current mapping slot's space ID
6203 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6204 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6205 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6206 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6207 xor r7,r7,r9 ; Compare space ID
6208 or r0,r11,r7 ; r0 <- !(!free && space match)
6209 xor r8,r8,r30 ; Compare virtual address
6210 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6211 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6212
6213 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6214 bdnz grs32SrchLp ; Iterate
6215
6216 mr r6,r3 ; r6 <- current mapping slot's flags
6217 clrrwi r5,r5,12 ; Remove flags from virtual address
6218 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6219 xor r4,r4,r9 ; Compare space ID
6220 or r0,r11,r4 ; r0 <- !(!free && space match)
6221 xor r5,r5,r30 ; Compare virtual address
6222 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6223 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6224 b grsSrchMiss ; No joy in our hash group
6225
6226grs64Search:
6227 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6228 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6229 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6230 b grs64SrchLp ; Let the search begin!
6231
6232 .align 5
6233grs64SrchLp:
6234 mr r6,r3 ; r6 <- current mapping slot's flags
6235 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6236 mr r7,r4 ; r7 <- current mapping slot's space ID
6237 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6238 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6239 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6240 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6241 xor r7,r7,r9 ; Compare space ID
6242 or r0,r11,r7 ; r0 <- !(!free && space match)
6243 xor r8,r8,r30 ; Compare virtual address
6244 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6245 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6246
6247 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6248 bdnz grs64SrchLp ; Iterate
6249
6250 mr r6,r3 ; r6 <- current mapping slot's flags
6251 clrrdi r5,r5,12 ; Remove flags from virtual address
6252 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6253 xor r4,r4,r9 ; Compare space ID
6254 or r0,r11,r4 ; r0 <- !(!free && space match)
6255 xor r5,r5,r30 ; Compare virtual address
6256 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6257 bne grsSrchMiss ; No joy in our hash group
6258
6259grsSrchHit:
6260 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6261 bne grsFindHost ; Yes, nothing to disconnect
6262
6263 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6264 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6265 ; r31 <- mapping's physical address
6266 ; r3 -> PTE slot physical address
6267 ; r4 -> High-order 32 bits of PTE
6268 ; r5 -> Low-order 32 bits of PTE
6269 ; r6 -> PCA
6270 ; r7 -> PCA physical address
6271 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6272 b grsFreePTE ; Join 64-bit path to release the PTE
6273grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6274 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6275grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6276 beq- grsFindHost ; No valid PTE, we're almost done
6277 lis r0,0x8000 ; Prepare free bit for this slot
6278 srw r0,r0,r2 ; Position free bit
6279 or r6,r6,r0 ; Set it in our PCA image
6280 lwz r8,mpPte(r31) ; Get PTE pointer
6281 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6282 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6283 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6284 stw r6,0(r7) ; Update PCA and unlock the PTEG
6285
6286grsFindHost:
6287
6288// We now have a dormant guest mapping that matches our space id and virtual address. Our next
6289// step is to locate the host mapping that completes the guest mapping's connection to a physical
6290// frame. The guest and host mappings must connect to the same physical frame, so they must both
6291// be chained on the same physent. We search the physent chain for a host mapping matching our
6292// host's space id and the host virtual address. If we succeed, we know that the entire chain
6293// of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6294// resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6295// host virtual or physical address has changed since the guest mapping was suspended, so it
6296// is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6297// our caller that it will have to take its long path, translating the host virtual address
6298// through the host's skiplist and installing a new guest mapping.
6299
6300 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6301 bl mapFindLockPN ; Find 'n' lock this page's physent
6302 mr. r24,r3 ; Got lock on our physent?
6303 beq-- grsBadPLock ; No, time to bail out
6304
6305 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6306
6307 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6308 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6309 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6310grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6311 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6312 lwz r7,mpFlags(r12) ; Get mapping's flags
6313 lhz r4,mpSpace(r12) ; Get mapping's space id number
6314 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6315 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6316
6317 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6318 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6319 xori r0,r0,mpNormal ; Normal mapping?
6320 xor r4,r4,r6 ; Compare w/ host space id number
6321 xor r5,r5,r29 ; Compare w/ host virtual address
6322 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6323 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6324 beq grsPEHit ; Hit
6325 b grsPELoop ; Iterate
6326
6327grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6328 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6329 ld r9,ppLink(r24) ; Get first mapping on physent
6330 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6331 andc r9,r9,r0 ; Cleanup mapping pointer
6332grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6333 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6334 lwz r7,mpFlags(r12) ; Get mapping's flags
6335 lhz r4,mpSpace(r12) ; Get mapping's space id number
6336 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6337 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6338 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6339 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6340 xori r0,r0,mpNormal ; Normal mapping?
6341 xor r4,r4,r6 ; Compare w/ host space id number
6342 xor r5,r5,r29 ; Compare w/ host virtual address
6343 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6344 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6345 beq grsPEHit ; Hit
6346 b grsPELp64 ; Iterate
6347
6348grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6349 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6350 stw r0,mpVAddr+4(r31) ; Write 'em back
6351
6352 eieio ; Ensure previous mapping updates are visible
6353 lwz r0,mpFlags(r31) ; Get flags
6354 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6355 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6356
6357 li r31,mapRtOK ; Indicate success
6358 b grsRelPhy ; Exit through physent lock release
6359
6360grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6361 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6362 la r11,ppLink+4(r24) ; Point to chain anchor
6363 lwz r9,ppLink+4(r24) ; Get chain anchor
6364 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6365grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6366 cmplw r9,r31 ; Is this the mapping to remove?
6367 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6368 bne grsRemNext ; No, chain onward
6369 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6370 stw r8,0(r11) ; Unchain gpv->phys mapping
6371 b grsDelete ; Finish deleting mapping
6372grsRemRetry:
6373 lwarx r0,0,r11 ; Get previous link
6374 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6375 stwcx. r0,0,r11 ; Update previous link
6376 bne- grsRemRetry ; Lost reservation, retry
6377 b grsDelete ; Finish deleting mapping
6378
6379 .align 5
6380grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6381 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6382 mr. r9,r8 ; Does next entry exist?
6383 b grsRemLoop ; Carry on
6384
6385grsRemove64:
6386 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6387 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6388 la r11,ppLink(r24) ; Point to chain anchor
6389 ld r9,ppLink(r24) ; Get chain anchor
6390 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6391grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6392 cmpld r9,r31 ; Is this the mapping to remove?
6393 ld r8,mpAlias(r9) ; Get forward chain pinter
6394 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6395 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6396 std r8,0(r11) ; Unchain gpv->phys mapping
6397 b grsDelete ; Finish deleting mapping
6398grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6399 and r0,r0,r7 ; Get flags
6400 or r0,r0,r8 ; Insert new forward pointer
6401 stdcx. r0,0,r11 ; Slam it back in
6402 bne-- grsRem64Rt ; Lost reservation, retry
6403 b grsDelete ; Finish deleting mapping
6404
6405 .align 5
6406grsRem64Nxt:
6407 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6408 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6409 mr. r9,r8 ; Does next entry exist?
6410 b grsRem64Lp ; Carry on
6411
6412grsDelete:
6413 lwz r3,mpFlags(r31) ; Get mapping's flags
6414 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6415 ori r3,r3,mpgFree ; Mark mapping free
6416 stw r3,mpFlags(r31) ; Update flags
6417
6418 li r31,mapRtNotFnd ; Didn't succeed
6419
6420grsRelPhy: mr r3,r24 ; r3 <- physent addr
6421 bl mapPhysUnlock ; Unlock physent chain
6422
6423grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6424 bl sxlkUnlock ; Release host pmap search lock
6425
6426grsRtn: mr r3,r31 ; r3 <- result code
6427 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6428 mtmsr r25 ; Restore 'rupts, translation
6429 isync ; Throw a small wrench into the pipeline
6430 b grsPopFrame ; Nothing to do now but pop a frame and return
6431grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6432grsPopFrame:
6433 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6434 ; Get caller's return address
6435 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6436 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6437 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6438 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6439 mtlr r0 ; Prepare return address
6440 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6441 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6442 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6443 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6444 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6445 lwz r1,0(r1) ; Pop stack frame
6446 blr ; Return to caller
6447
6448 .align 5
6449grsSrchMiss:
6450 li r31,mapRtNotFnd ; Could not locate requested mapping
6451 b grsRelPmap ; Exit through host pmap search lock release
6452
6453grsBadPLock:
6454grsPEMissMiss:
6455 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6456 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6457 li r3,failMapping ; The BOMB, Dmitri.
6458 sc ; The hydrogen bomb.
6459
6460
6461;
6462; Guest shadow assist -- add a guest mapping
6463;
6464; Adds a guest mapping.
6465;
6466; Parameters:
6467; r3 : address of host pmap, 32-bit kernel virtual address
6468; r4 : address of guest pmap, 32-bit kernel virtual address
6469; r5 : guest virtual address, high-order 32 bits
6470; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6471; r7 : new mapping's flags
6472; r8 : physical address, 32-bit page number
6473;
6474; Non-volatile register usage:
6475; r22 : hash group's physical address
6476; r23 : VMM extension block's physical address
6477; r24 : mapping's flags
6478; r25 : caller's msr image from mapSetUp
6479; r26 : physent physical address
6480; r27 : host pmap physical address
6481; r28 : guest pmap physical address
6482; r29 : physical address, 32-bit 4k-page number
6483; r30 : guest virtual address
6484; r31 : gva->phys mapping's physical address
6485;
6486
6487 .align 5
6488 .globl EXT(hw_add_map_gv)
6489
6490
6491LEXT(hw_add_map_gv)
6492
6493#define gadStackSize ((31-22+1)*4)+4
6494
6495 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6496 ; Mint a new stack frame
6497 mflr r0 ; Get caller's return address
6498 mfsprg r11,2 ; Get feature flags
6499 mtcrf 0x02,r11 ; Insert feature flags into cr6
6500 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6501 ; Save caller's return address
6502 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6503 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6504 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6505 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6506 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6507 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6508 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6509 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6510 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6511 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6512
6513 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6514 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6515 mr r24,r7 ; Copy guest mapping's flags
6516 mr r29,r8 ; Copy target frame's physical address
6517
6518 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6519 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6520 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6521 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6522 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6523 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6524 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6525 srwi r11,r30,12 ; Form shadow hash:
6526 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6527 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6528 ; Form index offset from hash page number
6529 add r22,r22,r10 ; r22 <- hash page index entry
6530 lwz r22,4(r22) ; r22 <- hash page paddr
6531 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6532 ; r22 <- hash group paddr
6533 b gadStart ; Get to it
6534
6535gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6536 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6537 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6538 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6539 srwi r11,r30,12 ; Form shadow hash:
6540 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6541 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6542 ; Form index offset from hash page number
6543 add r22,r22,r10 ; r22 <- hash page index entry
6544 ld r22,0(r22) ; r22 <- hash page paddr
6545 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6546 ; r22 <- hash group paddr
6547
6548gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6549 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6550 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6551 mr r25,r11 ; Save caller's msr image
6552
6553 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6554 bl sxlkExclusive ; Get lock exlusive
6555
6556 mr r31,r22 ; Prepare to search this group
6557 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6558 mtctr r0 ; in this group
6559 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6560
6561 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6562 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6563 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6564 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6565 b gad32SrchLp ; Let the search begin!
6566
6567 .align 5
6568gad32SrchLp:
6569 mr r6,r3 ; r6 <- current mapping slot's flags
6570 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6571 mr r7,r4 ; r7 <- current mapping slot's space ID
6572 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6573 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6574 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6575 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6576 xor r7,r7,r9 ; Compare space ID
6577 or r0,r11,r7 ; r0 <- !(!free && space match)
6578 xor r8,r8,r12 ; Compare virtual address
6579 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6580 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6581
6582 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6583 bdnz gad32SrchLp ; Iterate
6584
6585 mr r6,r3 ; r6 <- current mapping slot's flags
6586 clrrwi r5,r5,12 ; Remove flags from virtual address
6587 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6588 xor r4,r4,r9 ; Compare space ID
6589 or r0,r11,r4 ; r0 <- !(!free && && space match)
6590 xor r5,r5,r12 ; Compare virtual address
6591 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6592 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6593 b gadScan ; No joy in our hash group
6594
6595gad64Search:
6596 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6597 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6598 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6599 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6600 b gad64SrchLp ; Let the search begin!
6601
6602 .align 5
6603gad64SrchLp:
6604 mr r6,r3 ; r6 <- current mapping slot's flags
6605 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6606 mr r7,r4 ; r7 <- current mapping slot's space ID
6607 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6608 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6609 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6610 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6611 xor r7,r7,r9 ; Compare space ID
6612 or r0,r11,r7 ; r0 <- !(!free && space match)
6613 xor r8,r8,r12 ; Compare virtual address
6614 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6615 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6616
6617 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6618 bdnz gad64SrchLp ; Iterate
6619
6620 mr r6,r3 ; r6 <- current mapping slot's flags
6621 clrrdi r5,r5,12 ; Remove flags from virtual address
6622 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6623 xor r4,r4,r9 ; Compare space ID
6624 or r0,r11,r4 ; r0 <- !(!free && && space match)
6625 xor r5,r5,r12 ; Compare virtual address
6626 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6627 bne gadScan ; No joy in our hash group
6628 b gadRelPmap ; Hit, let upper-level redrive sort it out
6629
6630gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6631 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6632 ; Prepare to address slot at cursor
6633 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6634 mtctr r0 ; in this group
6635 or r2,r22,r12 ; r2 <- 1st mapping to search
6636 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6637 li r11,0 ; No dormant entries found yet
6638 b gadScanLoop ; Let the search begin!
6639
6640 .align 5
6641gadScanLoop:
6642 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6643 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6644 ; Trim off any carry, wrapping into slot number range
6645 mr r31,r2 ; r31 <- current mapping's address
6646 or r2,r22,r12 ; r2 <- next mapping to search
6647 mr r6,r3 ; r6 <- current mapping slot's flags
6648 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6649 rlwinm. r0,r6,0,mpgFree ; Test free flag
6650 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6651 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6652 xori r0,r0,mpgDormant ; Invert dormant flag
6653 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6654 bne gadNotDorm ; Not dormant or we've already seen one
6655 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6656gadNotDorm: bdnz gadScanLoop ; Iterate
6657
6658 mr r31,r2 ; r31 <- final mapping's address
6659 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6660 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6661 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6662 xori r0,r0,mpgDormant ; Invert dormant flag
6663 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6664 bne gadCkDormant ; Not dormant or we've already seen one
6665 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6666
6667gadCkDormant:
6668 mr. r31,r11 ; Get dormant mapping, if any, and test
6669 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6670
6671gadSteal:
6672 lbz r12,mpgCursor(r22) ; Get group's cursor
6673 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6674 ; Prepare to address slot at cursor
6675 or r31,r22,r12 ; r31 <- address of mapping to steal
6676
6677 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6678 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6679 ; r31 <- mapping's physical address
6680 ; r3 -> PTE slot physical address
6681 ; r4 -> High-order 32 bits of PTE
6682 ; r5 -> Low-order 32 bits of PTE
6683 ; r6 -> PCA
6684 ; r7 -> PCA physical address
6685 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6686 b gadFreePTE ; Join 64-bit path to release the PTE
6687gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6688 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6689gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6690 beq- gadUpCursor ; No valid PTE, we're almost done
6691 lis r0,0x8000 ; Prepare free bit for this slot
6692 srw r0,r0,r2 ; Position free bit
6693 or r6,r6,r0 ; Set it in our PCA image
6694 lwz r8,mpPte(r31) ; Get PTE pointer
6695 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6696 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6697 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6698 stw r6,0(r7) ; Update PCA and unlock the PTEG
6699
6700gadUpCursor:
6701 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6702 ; Recover slot number from stolen mapping's address
6703 addi r12,r12,1 ; Increment slot number
6704 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6705 stb r12,mpgCursor(r22) ; Update group's cursor
6706
6707 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6708 bl mapFindLockPN ; Find 'n' lock this page's physent
6709 mr. r26,r3 ; Got lock on our physent?
6710 beq-- gadBadPLock ; No, time to bail out
6711
6712 crset cr1_eq ; cr1_eq <- previous link is the anchor
6713 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6714 la r11,ppLink+4(r26) ; Point to chain anchor
6715 lwz r9,ppLink+4(r26) ; Get chain anchor
6716 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6717gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6718 cmplw r9,r31 ; Is this the mapping to remove?
6719 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6720 bne gadRemNext ; No, chain onward
6721 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6722 stw r8,0(r11) ; Unchain gpv->phys mapping
6723 b gadDelDone ; Finish deleting mapping
6724gadRemRetry:
6725 lwarx r0,0,r11 ; Get previous link
6726 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6727 stwcx. r0,0,r11 ; Update previous link
6728 bne- gadRemRetry ; Lost reservation, retry
6729 b gadDelDone ; Finish deleting mapping
6730
6731gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6732 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6733 mr. r9,r8 ; Does next entry exist?
6734 b gadRemLoop ; Carry on
6735
6736gadRemove64:
6737 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6738 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6739 la r11,ppLink(r26) ; Point to chain anchor
6740 ld r9,ppLink(r26) ; Get chain anchor
6741 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6742gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6743 cmpld r9,r31 ; Is this the mapping to remove?
6744 ld r8,mpAlias(r9) ; Get forward chain pinter
6745 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6746 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6747 std r8,0(r11) ; Unchain gpv->phys mapping
6748 b gadDelDone ; Finish deleting mapping
6749gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6750 and r0,r0,r7 ; Get flags
6751 or r0,r0,r8 ; Insert new forward pointer
6752 stdcx. r0,0,r11 ; Slam it back in
6753 bne-- gadRem64Rt ; Lost reservation, retry
6754 b gadDelDone ; Finish deleting mapping
6755
6756 .align 5
6757gadRem64Nxt:
6758 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6759 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6760 mr. r9,r8 ; Does next entry exist?
6761 b gadRem64Lp ; Carry on
6762
6763gadDelDone:
6764 mr r3,r26 ; Get physent address
6765 bl mapPhysUnlock ; Unlock physent chain
6766
6767gadFillMap:
6768 lwz r12,pmapSpace(r28) ; Get guest space id number
6769 li r2,0 ; Get a zero
6770 stw r24,mpFlags(r31) ; Set mapping's flags
6771 sth r12,mpSpace(r31) ; Set mapping's space id number
6772 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6773 stw r29,mpPAddr(r31) ; Set mapping's physical address
6774 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6775 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6776 b gadChain ; Continue with chaining mapping to physent
6777gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6778
6779gadChain: mr r3,r29 ; r3 <- physical frame address
6780 bl mapFindLockPN ; Find 'n' lock this page's physent
6781 mr. r26,r3 ; Got lock on our physent?
6782 beq-- gadBadPLock ; No, time to bail out
6783
6784 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6785 lwz r12,ppLink+4(r26) ; Get forward chain
6786 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6787 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6788 stw r11,mpAlias+4(r31) ; New mapping will head chain
6789 stw r12,ppLink+4(r26) ; Point physent to new mapping
6790 b gadFinish ; All over now...
6791
6792gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6793 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6794 ld r12,ppLink(r26) ; Get forward chain
6795 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6796 and r12,r12,r7 ; Isolate pointer's flags
6797 or r12,r12,r31 ; Insert new mapping's address forming pointer
6798 std r11,mpAlias(r31) ; New mapping will head chain
6799 std r12,ppLink(r26) ; Point physent to new mapping
6800
6801gadFinish: eieio ; Ensure new mapping is completely visible
6802
6803gadRelPhy: mr r3,r26 ; r3 <- physent addr
6804 bl mapPhysUnlock ; Unlock physent chain
6805
6806gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6807 bl sxlkUnlock ; Release host pmap search lock
6808
6809 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6810 mtmsr r25 ; Restore 'rupts, translation
6811 isync ; Throw a small wrench into the pipeline
6812 b gadPopFrame ; Nothing to do now but pop a frame and return
6813gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6814gadPopFrame:
6815 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6816 ; Get caller's return address
6817 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6818 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6819 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6820 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6821 mtlr r0 ; Prepare return address
6822 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6823 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6824 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6825 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6826 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6827 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6828 lwz r1,0(r1) ; Pop stack frame
6829 blr ; Return to caller
6830
6831gadPEMissMiss:
6832gadBadPLock:
6833 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6834 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6835 li r3,failMapping ; The BOMB, Dmitri.
6836 sc ; The hydrogen bomb.
6837
6838
6839;
6840; Guest shadow assist -- supend a guest mapping
6841;
6842; Suspends a guest mapping.
6843;
6844; Parameters:
6845; r3 : address of host pmap, 32-bit kernel virtual address
6846; r4 : address of guest pmap, 32-bit kernel virtual address
6847; r5 : guest virtual address, high-order 32 bits
6848; r6 : guest virtual address, low-order 32 bits
6849;
6850; Non-volatile register usage:
6851; r26 : VMM extension block's physical address
6852; r27 : host pmap physical address
6853; r28 : guest pmap physical address
6854; r29 : caller's msr image from mapSetUp
6855; r30 : guest virtual address
6856; r31 : gva->phys mapping's physical address
6857;
6858
6859 .align 5
6860 .globl EXT(hw_susp_map_gv)
6861
6862LEXT(hw_susp_map_gv)
6863
6864#define gsuStackSize ((31-26+1)*4)+4
6865
6866 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6867 ; Mint a new stack frame
6868 mflr r0 ; Get caller's return address
6869 mfsprg r11,2 ; Get feature flags
6870 mtcrf 0x02,r11 ; Insert feature flags into cr6
6871 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6872 ; Save caller's return address
6873 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6874 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6875 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6876 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6877 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6878 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6879
6880 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6881
6882 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6883 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6884 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6885
6886 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6887 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6888 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6889 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6890 srwi r11,r30,12 ; Form shadow hash:
6891 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6892 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6893 ; Form index offset from hash page number
6894 add r31,r31,r10 ; r31 <- hash page index entry
6895 lwz r31,4(r31) ; r31 <- hash page paddr
6896 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6897 ; r31 <- hash group paddr
6898 b gsuStart ; Get to it
6899gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6900 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6901 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6902 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6903 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6904 srwi r11,r30,12 ; Form shadow hash:
6905 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6906 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6907 ; Form index offset from hash page number
6908 add r31,r31,r10 ; r31 <- hash page index entry
6909 ld r31,0(r31) ; r31 <- hash page paddr
6910 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6911 ; r31 <- hash group paddr
6912
6913gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6914 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6915 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6916 mr r29,r11 ; Save caller's msr image
6917
6918 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6919 bl sxlkExclusive ; Get lock exclusive
6920
6921 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6922 mtctr r0 ; in this group
6923 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6924
6925 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6926 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6927 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6928 b gsu32SrchLp ; Let the search begin!
6929
6930 .align 5
6931gsu32SrchLp:
6932 mr r6,r3 ; r6 <- current mapping slot's flags
6933 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6934 mr r7,r4 ; r7 <- current mapping slot's space ID
6935 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6936 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6937 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6938 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6939 xor r7,r7,r9 ; Compare space ID
6940 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6941 xor r8,r8,r30 ; Compare virtual address
6942 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6943 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6944
6945 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6946 bdnz gsu32SrchLp ; Iterate
6947
6948 mr r6,r3 ; r6 <- current mapping slot's flags
6949 clrrwi r5,r5,12 ; Remove flags from virtual address
6950 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6951 xor r4,r4,r9 ; Compare space ID
6952 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6953 xor r5,r5,r30 ; Compare virtual address
6954 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6955 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6956 b gsuSrchMiss ; No joy in our hash group
6957
6958gsu64Search:
6959 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6960 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6961 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6962 b gsu64SrchLp ; Let the search begin!
6963
6964 .align 5
6965gsu64SrchLp:
6966 mr r6,r3 ; r6 <- current mapping slot's flags
6967 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6968 mr r7,r4 ; r7 <- current mapping slot's space ID
6969 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6970 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6971 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6972 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6973 xor r7,r7,r9 ; Compare space ID
6974 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6975 xor r8,r8,r30 ; Compare virtual address
6976 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6977 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6978
6979 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6980 bdnz gsu64SrchLp ; Iterate
6981
6982 mr r6,r3 ; r6 <- current mapping slot's flags
6983 clrrdi r5,r5,12 ; Remove flags from virtual address
6984 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6985 xor r4,r4,r9 ; Compare space ID
6986 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6987 xor r5,r5,r30 ; Compare virtual address
6988 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6989 bne gsuSrchMiss ; No joy in our hash group
6990
6991gsuSrchHit:
6992 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
6993 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6994 ; r31 <- mapping's physical address
6995 ; r3 -> PTE slot physical address
6996 ; r4 -> High-order 32 bits of PTE
6997 ; r5 -> Low-order 32 bits of PTE
6998 ; r6 -> PCA
6999 ; r7 -> PCA physical address
7000 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7001 b gsuFreePTE ; Join 64-bit path to release the PTE
7002gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7003 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7004gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
7005 beq- gsuNoPTE ; No valid PTE, we're almost done
7006 lis r0,0x8000 ; Prepare free bit for this slot
7007 srw r0,r0,r2 ; Position free bit
7008 or r6,r6,r0 ; Set it in our PCA image
7009 lwz r8,mpPte(r31) ; Get PTE pointer
7010 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7011 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7012 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7013 stw r6,0(r7) ; Update PCA and unlock the PTEG
7014
7015gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7016 ori r3,r3,mpgDormant ; Mark entry dormant
7017 stw r3,mpFlags(r31) ; Save updated flags
7018 eieio ; Ensure update is visible when we unlock
7019
7020gsuSrchMiss:
7021 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7022 bl sxlkUnlock ; Release host pmap search lock
7023
7024 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7025 mtmsr r29 ; Restore 'rupts, translation
7026 isync ; Throw a small wrench into the pipeline
7027 b gsuPopFrame ; Nothing to do now but pop a frame and return
7028gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7029gsuPopFrame:
7030 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7031 ; Get caller's return address
7032 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7033 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7034 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7035 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7036 mtlr r0 ; Prepare return address
7037 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7038 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7039 lwz r1,0(r1) ; Pop stack frame
7040 blr ; Return to caller
7041
7042;
7043; Guest shadow assist -- test guest mapping reference and change bits
7044;
7045; Locates the specified guest mapping, and if it exists gathers its reference
7046