2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 #include <db_machine_commands.h>
35 #include <mach_debug.h>
37 #include <ppc/proc_reg.h>
38 #include <ppc/exception.h>
39 #include <ppc/Performance.h>
40 #include <ppc/exception.h>
41 #include <mach/ppc/vm_param.h>
48 ; +--------+--------+--------+--------+--------+--------+--------+--------+
49 ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
50 ; +--------+--------+--------+--------+--------+--------+--------+--------+
54 ; +--------+--------+--------+
55 ; |//////BB|BBBBBBBB|BBBB////| - SID - base
56 ; +--------+--------+--------+
60 ; +--------+--------+--------+
61 ; |////////|11111111|111111//| - SID - copy 1
62 ; +--------+--------+--------+
66 ; +--------+--------+--------+
67 ; |////////|//222222|22222222| - SID - copy 2
68 ; +--------+--------+--------+
72 ; +--------+--------+--------+
73 ; |//////33|33333333|33//////| - SID - copy 3 - not needed
74 ; +--------+--------+--------+ for 65 bit VPN
78 ; +--------+--------+--------+--------+--------+--------+--------+
79 ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
80 ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
83 ; +--------+--------+--------+--------+--------+--------+--------+
84 ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
85 ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
92 ; +--------+--------+--------+--------+--------+--------+--------+
93 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
94 ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
96 ; 0 0 1 2 3 4 4 5 6 7 7
97 ; 0 8 6 4 2 0 8 6 4 2 9
98 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
99 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
100 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
104 /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
106 * Maps a page or block into a pmap
108 * Returns 0 if add worked or the vaddr of the first overlap if not
110 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
112 * 1) bump mapping busy count
114 * 3) find mapping full path - finds all possible list previous elements
115 * 4) upgrade pmap to exclusive
116 * 5) add mapping to search list
122 * 11) drop mapping busy count
125 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
127 * 1) bump mapping busy count
129 * 3) find mapping full path - finds all possible list previous elements
130 * 4) upgrade pmap to exclusive
131 * 5) add mapping to search list
133 * 7) drop mapping busy count
138 .globl EXT(hw_add_map)
142 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
143 mflr r0 ; Save the link register
144 stw r17,FM_ARG0+0x00(r1) ; Save a register
145 stw r18,FM_ARG0+0x04(r1) ; Save a register
146 stw r19,FM_ARG0+0x08(r1) ; Save a register
147 mfsprg r19,2 ; Get feature flags
148 stw r20,FM_ARG0+0x0C(r1) ; Save a register
149 stw r21,FM_ARG0+0x10(r1) ; Save a register
150 mtcrf 0x02,r19 ; move pf64Bit cr6
151 stw r22,FM_ARG0+0x14(r1) ; Save a register
152 stw r23,FM_ARG0+0x18(r1) ; Save a register
153 stw r24,FM_ARG0+0x1C(r1) ; Save a register
154 stw r25,FM_ARG0+0x20(r1) ; Save a register
155 stw r26,FM_ARG0+0x24(r1) ; Save a register
156 stw r27,FM_ARG0+0x28(r1) ; Save a register
157 stw r28,FM_ARG0+0x2C(r1) ; Save a register
158 stw r29,FM_ARG0+0x30(r1) ; Save a register
159 stw r30,FM_ARG0+0x34(r1) ; Save a register
160 stw r31,FM_ARG0+0x38(r1) ; Save a register
161 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
164 lwz r11,pmapFlags(r3) ; Get pmaps flags
165 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
166 bne hamPanic ; Call not valid for guest shadow assist pmap
169 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
170 mr r28,r3 ; Save the pmap
171 mr r31,r4 ; Save the mapping
172 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
173 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
174 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
178 hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
179 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
181 hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
183 mr r17,r11 ; Save the MSR
184 xor r28,r28,r20 ; Convert the pmap to physical addressing
185 xor r31,r31,r21 ; Convert the mapping to physical addressing
187 la r3,pmapSXlk(r28) ; Point to the pmap search lock
188 bl sxlkShared ; Go get a shared lock on the mapping lists
189 mr. r3,r3 ; Did we get the lock?
190 lwz r24,mpFlags(r31) ; Pick up the flags
191 bne-- hamBadLock ; Nope...
193 li r21,0 ; Remember that we have the shared lock
196 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
197 ; here so that we will know the previous elements so we can dequeue them
201 hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
202 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
203 mr r3,r28 ; Pass in pmap to search
204 lhz r23,mpBSize(r31) ; Get the block size for later
205 mr r29,r4 ; Save top half of vaddr for later
206 mr r30,r5 ; Save bottom half of vaddr for later
208 bl EXT(mapSearchFull) ; Go see if we can find it
210 li r22,lo16(0x800C) ; Get 0xFFFF800C
211 rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
212 addi r23,r23,1 ; Get actual length
213 rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25
214 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
215 slw r9,r23,r22 ; Isolate the low part
216 rlwnm r22,r23,r22,22,31 ; Extract the high order
217 addic r23,r9,-4096 ; Get the length to the last page
218 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
219 addme r22,r22 ; Do high order as well...
220 mr. r3,r3 ; Did we find a mapping here?
221 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
222 bne-- hamOverlay ; We found a mapping, this is no good, can not double map...
224 addc r9,r0,r23 ; Add size to get last page in new range
225 or. r0,r4,r5 ; Are we beyond the end?
226 adde r8,r29,r22 ; Add the rest of the length on
227 rlwinm r9,r9,0,0,31 ; Clean top half of sum
228 beq++ hamFits ; We are at the end...
230 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
231 cmplw r8,r4 ; Is our end before the next (top part)
232 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
233 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
235 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
238 ; Here we try to convert to an exclusive lock. This will fail if someone else
241 hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
242 la r3,pmapSXlk(r28) ; Point to the pmap search lock
244 bne-- hamGotX ; We already have the exclusive...
246 bl sxlkPromote ; Try to promote shared to exclusive
247 mr. r3,r3 ; Could we?
248 beq++ hamGotX ; Yeah...
251 ; Since we could not promote our lock, we need to convert to it.
252 ; That means that we drop the shared lock and wait to get it
253 ; exclusive. Since we release the lock, we need to do the look up
257 la r3,pmapSXlk(r28) ; Point to the pmap search lock
258 bl sxlkConvert ; Convert shared to exclusive
259 mr. r3,r3 ; Could we?
260 bne-- hamBadLock ; Nope, we must have timed out...
262 li r21,1 ; Remember that we have the exclusive lock
263 b hamRescan ; Go look again...
267 hamGotX: mr r3,r28 ; Get the pmap to insert into
268 mr r4,r31 ; Point to the mapping
269 bl EXT(mapInsert) ; Insert the mapping into the list
271 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
272 lhz r8,mpSpace(r31) ; Get the address space
273 lwz r11,lgpPcfg(r11) ; Get the page config
274 mfsdr1 r7 ; Get the hash table base/bounds
275 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
277 andi. r0,r24,mpType ; Is this a normal mapping?
279 rlwimi r8,r8,14,4,17 ; Double address space
280 rlwinm r9,r30,0,4,31 ; Clear segment
281 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
282 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
283 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
284 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
285 addi r4,r4,1 ; Bump up the mapped page count
286 srw r9,r9,r11 ; Isolate just the page index
287 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
288 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
289 xor r9,r9,r10 ; Get the hash to the PTEG
291 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
293 bl mapPhysFindLock ; Go find and lock the physent
295 bt++ pf64Bitb,ham64 ; This is 64-bit...
297 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
298 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
299 slwi r9,r9,6 ; Make PTEG offset
300 ori r7,r7,0xFFC0 ; Stick in the bottom part
301 rlwinm r12,r11,0,~ppFlags ; Clean it up
302 and r9,r9,r7 ; Wrap offset into table
303 mr r4,r31 ; Set the link to install
304 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
305 stw r12,mpAlias+4(r31) ; Move to the mapping
306 bl mapPhyCSet32 ; Install the link
307 b hamDone ; Go finish up...
311 ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
312 subfic r7,r7,46 ; Get number of leading zeros
313 eqv r4,r4,r4 ; Get all ones
314 ld r11,ppLink(r3) ; Get the alias chain pointer
315 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
316 srd r4,r4,r7 ; Get the wrap mask
317 sldi r9,r9,7 ; Change hash to PTEG offset
318 andc r11,r11,r0 ; Clean out the lock and flags
319 and r9,r9,r4 ; Wrap to PTEG
321 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
322 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
324 bl mapPhyCSet64 ; Install the link
326 hamDone: bl mapPhysUnlock ; Unlock the physent chain
328 hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
329 bl sxlkUnlock ; Unlock the search list
331 mr r3,r31 ; Get the mapping pointer
332 bl mapDropBusy ; Drop the busy count
334 li r3,0 ; Set successful return
335 li r4,0 ; Set successful return
337 hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
339 mtmsr r17 ; Restore enables/translation/etc.
341 b hamReturnC ; Join common...
343 hamR64: mtmsrd r17 ; Restore enables/translation/etc.
346 hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
347 lwz r17,FM_ARG0+0x00(r1) ; Save a register
348 lwz r18,FM_ARG0+0x04(r1) ; Save a register
349 lwz r19,FM_ARG0+0x08(r1) ; Save a register
350 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
351 mtlr r0 ; Restore the return
352 lwz r21,FM_ARG0+0x10(r1) ; Save a register
353 lwz r22,FM_ARG0+0x14(r1) ; Save a register
354 lwz r23,FM_ARG0+0x18(r1) ; Save a register
355 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
356 lwz r25,FM_ARG0+0x20(r1) ; Save a register
357 lwz r26,FM_ARG0+0x24(r1) ; Save a register
358 lwz r27,FM_ARG0+0x28(r1) ; Save a register
359 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
360 lwz r29,FM_ARG0+0x30(r1) ; Save a register
361 lwz r30,FM_ARG0+0x34(r1) ; Save a register
362 lwz r31,FM_ARG0+0x38(r1) ; Save a register
363 lwz r1,0(r1) ; Pop the stack
370 hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
371 li r0,mpC|mpR ; Get a mask to turn off RC bits
372 lwz r23,mpFlags(r31) ; Get the requested flags
373 lwz r20,mpVAddr(r3) ; Get the overlay address
374 lwz r8,mpVAddr(r31) ; Get the requested address
375 lwz r21,mpVAddr+4(r3) ; Get the overlay address
376 lwz r9,mpVAddr+4(r31) ; Get the requested address
377 lhz r10,mpBSize(r3) ; Get the overlay length
378 lhz r11,mpBSize(r31) ; Get the requested length
379 lwz r24,mpPAddr(r3) ; Get the overlay physical address
380 lwz r25,mpPAddr(r31) ; Get the requested physical address
381 andc r21,r21,r0 ; Clear RC bits
382 andc r9,r9,r0 ; Clear RC bits
384 la r3,pmapSXlk(r28) ; Point to the pmap search lock
385 bl sxlkUnlock ; Unlock the search list
387 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
388 mr r3,r20 ; Save the top of the colliding address
389 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
391 bne++ hamRemv ; Removing, go say so so we help...
393 cmplw r20,r8 ; High part of vaddr the same?
394 cmplw cr1,r21,r9 ; Low part?
395 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
397 cmplw r10,r11 ; Size the same?
398 cmplw cr1,r24,r25 ; Physical address?
399 crand cr5_eq,cr5_eq,cr0_eq ; Remember
400 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
402 xor r23,r23,r22 ; Compare mapping flag words
403 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
404 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
405 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
407 ori r4,r4,mapRtMapDup ; Set duplicate
408 b hamReturn ; And leave...
410 hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
411 b hamReturn ; Come back yall...
413 hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
414 b hamReturn ; Join common epilog code
418 hamBadLock: li r3,0 ; Set lock time out error code
419 li r4,mapRtBadLk ; Set lock time out error code
420 b hamReturn ; Leave....
422 hamPanic: lis r0,hi16(Choke) ; System abend
423 ori r0,r0,lo16(Choke) ; System abend
424 li r3,failMapping ; Show that we failed some kind of mapping thing
431 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
433 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
434 * a 64-bit quantity, it is a long long so it is in R4 and R5.
436 * We return the virtual address of the removed mapping as a
439 * Note that this is designed to be called from 32-bit mode with a stack.
441 * We disable translation and all interruptions here. This keeps is
442 * from having to worry about a deadlock due to having anything locked
443 * and needing it to process a fault.
445 * Note that this must be done with both interruptions off and VM off
447 * Remove mapping via pmap, regular page, no pte
450 * 2) find mapping full path - finds all possible list previous elements
451 * 4) upgrade pmap to exclusive
452 * 3) bump mapping busy count
453 * 5) remove mapping from search list
456 * 8) remove from physent
458 * 10) drop mapping busy count
459 * 11) drain mapping busy count
462 * Remove mapping via pmap, regular page, with pte
465 * 2) find mapping full path - finds all possible list previous elements
466 * 3) upgrade lock to exclusive
467 * 4) bump mapping busy count
469 * 6) invalidate pte and tlbie
470 * 7) atomic merge rc into physent
472 * 9) remove mapping from search list
475 * 12) remove from physent
477 * 14) drop mapping busy count
478 * 15) drain mapping busy count
481 * Remove mapping via pmap, I/O or block
484 * 2) find mapping full path - finds all possible list previous elements
485 * 3) upgrade lock to exclusive
486 * 4) bump mapping busy count
487 * 5) mark remove-in-progress
488 * 6) check and bump remove chunk cursor if needed
490 * 8) if something to invalidate, go to step 11
493 * 10) return with mapRtRemove to force higher level to call again
496 * 12) invalidate ptes, no tlbie
498 * 14) repeat 11 - 13 for all pages in chunk
499 * 15) if not final chunk, go to step 9
500 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
501 * 17) lock pmap share
502 * 18) find mapping full path - finds all possible list previous elements
503 * 19) upgrade lock to exclusive
504 * 20) remove mapping from search list
505 * 21) drop mapping busy count
506 * 22) drain mapping busy count
511 .globl EXT(hw_rem_map)
516 ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
517 ; THE HW_PURGE_* ROUTINES ALSO
520 #define hrmStackSize ((31-15+1)*4)+4
521 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
522 mflr r0 ; Save the link register
523 stw r15,FM_ARG0+0x00(r1) ; Save a register
524 stw r16,FM_ARG0+0x04(r1) ; Save a register
525 stw r17,FM_ARG0+0x08(r1) ; Save a register
526 stw r18,FM_ARG0+0x0C(r1) ; Save a register
527 stw r19,FM_ARG0+0x10(r1) ; Save a register
528 mfsprg r19,2 ; Get feature flags
529 stw r20,FM_ARG0+0x14(r1) ; Save a register
530 stw r21,FM_ARG0+0x18(r1) ; Save a register
531 mtcrf 0x02,r19 ; move pf64Bit cr6
532 stw r22,FM_ARG0+0x1C(r1) ; Save a register
533 stw r23,FM_ARG0+0x20(r1) ; Save a register
534 stw r24,FM_ARG0+0x24(r1) ; Save a register
535 stw r25,FM_ARG0+0x28(r1) ; Save a register
536 stw r26,FM_ARG0+0x2C(r1) ; Save a register
537 stw r27,FM_ARG0+0x30(r1) ; Save a register
538 stw r28,FM_ARG0+0x34(r1) ; Save a register
539 stw r29,FM_ARG0+0x38(r1) ; Save a register
540 stw r30,FM_ARG0+0x3C(r1) ; Save a register
541 stw r31,FM_ARG0+0x40(r1) ; Save a register
542 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
543 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
546 lwz r11,pmapFlags(r3) ; Get pmaps flags
547 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
548 bne hrmPanic ; Call not valid for guest shadow assist pmap
551 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
552 lwz r9,pmapvr+4(r3) ; Get conversion mask
555 hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
558 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
560 xor r28,r3,r9 ; Convert the pmap to physical addressing
563 ; Here is where we join in from the hw_purge_* routines
566 hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
567 mfsprg r19,2 ; Get feature flags again (for alternate entries)
569 mr r17,r11 ; Save the MSR
570 mr r29,r4 ; Top half of vaddr
571 mr r30,r5 ; Bottom half of vaddr
573 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
574 bne-- hrmGuest ; Yes, handle specially
576 la r3,pmapSXlk(r28) ; Point to the pmap search lock
577 bl sxlkShared ; Go get a shared lock on the mapping lists
578 mr. r3,r3 ; Did we get the lock?
579 bne-- hrmBadLock ; Nope...
582 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
583 ; here so that we will know the previous elements so we can dequeue them
584 ; later. Note: we get back mpFlags in R7.
587 mr r3,r28 ; Pass in pmap to search
588 mr r4,r29 ; High order of address
589 mr r5,r30 ; Low order of address
590 bl EXT(mapSearchFull) ; Go see if we can find it
592 andi. r0,r7,mpPerm ; Mapping marked permanent?
593 crmove cr5_eq,cr0_eq ; Remember permanent marking
594 mr r20,r7 ; Remember mpFlags
595 mr. r31,r3 ; Did we? (And remember mapping address for later)
596 mr r15,r4 ; Save top of next vaddr
597 mr r16,r5 ; Save bottom of next vaddr
598 beq-- hrmNotFound ; Nope, not found...
600 bf-- cr5_eq,hrmPerm ; This one can't be removed...
602 ; Here we try to promote to an exclusive lock. This will fail if someone else
606 la r3,pmapSXlk(r28) ; Point to the pmap search lock
607 bl sxlkPromote ; Try to promote shared to exclusive
608 mr. r3,r3 ; Could we?
609 beq++ hrmGotX ; Yeah...
612 ; Since we could not promote our lock, we need to convert to it.
613 ; That means that we drop the shared lock and wait to get it
614 ; exclusive. Since we release the lock, we need to do the look up
618 la r3,pmapSXlk(r28) ; Point to the pmap search lock
619 bl sxlkConvert ; Convert shared to exclusive
620 mr. r3,r3 ; Could we?
621 bne-- hrmBadLock ; Nope, we must have timed out...
623 mr r3,r28 ; Pass in pmap to search
624 mr r4,r29 ; High order of address
625 mr r5,r30 ; Low order of address
626 bl EXT(mapSearchFull) ; Rescan the list
628 andi. r0,r7,mpPerm ; Mapping marked permanent?
629 crmove cr5_eq,cr0_eq ; Remember permanent marking
630 mr. r31,r3 ; Did we lose it when we converted?
631 mr r20,r7 ; Remember mpFlags
632 mr r15,r4 ; Save top of next vaddr
633 mr r16,r5 ; Save bottom of next vaddr
634 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
636 bf-- cr5_eq,hrmPerm ; This one can't be removed...
639 ; We have an exclusive lock on the mapping chain. And we
640 ; also have the busy count bumped in the mapping so it can
644 hrmGotX: mr r3,r31 ; Get the mapping
645 bl mapBumpBusy ; Bump up the busy count
648 ; Invalidate any PTEs associated with this
649 ; mapping (more than one if a block) and accumulate the reference
652 ; Here is also where we need to split 32- and 64-bit processing
655 lwz r21,mpPte(r31) ; Grab the offset to the PTE
656 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
657 mfsdr1 r29 ; Get the hash table base and size
659 rlwinm r0,r20,0,mpType ; Isolate mapping type
660 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
661 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
663 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
664 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
665 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
666 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
667 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
668 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
669 andc r29,r29,r2 ; Clean up hash table base
670 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
671 mr r30,r23 ; Move the now merged vaddr to the correct register
672 add r26,r29,r21 ; Point to the PTEG slot
674 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
676 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
677 beq- cr5,hrmBlock32 ; Go treat block specially...
678 subfic r9,r9,-4 ; Get the PCA entry offset
679 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
680 add r7,r9,r29 ; Point to the PCA slot
682 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
684 lwz r21,mpPte(r31) ; Get the quick pointer again
685 lwz r5,0(r26) ; Get the top of PTE
687 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
688 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
689 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
690 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
691 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
693 stw r5,0(r26) ; Invalidate the PTE
695 li r9,tlbieLock ; Get the TLBIE lock
697 sync ; Make sure the invalid PTE is actually in memory
699 hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
700 mr. r5,r5 ; Is it locked?
701 li r5,1 ; Get locked indicator
702 bne- hrmPtlb32 ; It is locked, go spin...
703 stwcx. r5,0,r9 ; Try to get it
704 bne- hrmPtlb32 ; We was beat...
706 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
708 tlbie r30 ; Invalidate it all corresponding TLB entries
710 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
712 eieio ; Make sure that the tlbie happens first
713 tlbsync ; Wait for everyone to catch up
714 sync ; Make sure of it all
716 hrmNTlbs: li r0,0 ; Clear this
717 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
718 stw r0,tlbieLock(0) ; Clear the tlbie lock
719 lis r0,0x8000 ; Get bit for slot 0
720 eieio ; Make sure those RC bit have been stashed in PTE
722 srw r0,r0,r2 ; Get the allocation hash mask
723 lwz r22,4(r26) ; Get the latest reference and change bits
724 or r6,r6,r0 ; Show that this slot is free
727 eieio ; Make sure all updates come first
728 stw r6,0(r7) ; Unlock the PTEG
731 ; Now, it is time to remove the mapping and unlock the chain.
732 ; But first, we need to make sure no one else is using this
733 ; mapping so we drain the busy now
736 hrmPysDQ32: mr r3,r31 ; Point to the mapping
737 bl mapDrainBusy ; Go wait until mapping is unused
739 mr r3,r28 ; Get the pmap to remove from
740 mr r4,r31 ; Point to the mapping
741 bl EXT(mapRemove) ; Remove the mapping from the list
743 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
744 rlwinm r0,r20,0,mpType ; Isolate mapping type
745 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
746 la r3,pmapSXlk(r28) ; Point to the pmap search lock
747 subi r4,r4,1 ; Drop down the mapped page count
748 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
749 bl sxlkUnlock ; Unlock the search list
751 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
753 bl mapPhysFindLock ; Go find and lock the physent
755 lwz r9,ppLink+4(r3) ; Get first mapping
757 mr r4,r22 ; Get the RC bits we just got
758 bl mapPhysMerge ; Go merge the RC bits
760 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
762 cmplw r9,r31 ; Are we the first on the list?
763 bne- hrmNot1st ; Nope...
766 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
767 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
768 bl mapPhyCSet32 ; Go set the physent link and preserve flags
770 b hrmPhyDQd ; Join up and unlock it all...
774 hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
775 and r8,r8,r31 ; Get back to a page
776 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
778 la r3,pmapSXlk(r28) ; Point to the pmap search lock
779 bl sxlkUnlock ; Unlock the search list
781 xor r3,r31,r8 ; Flip mapping address to virtual
782 ori r3,r3,mapRtPerm ; Set permanent mapping error
785 hrmBadLock: li r3,mapRtBadLk ; Set bad lock
789 la r3,pmapSXlk(r28) ; Point to the pmap search lock
790 bl sxlkUnlock ; Unlock the search list
793 mr r3,r31 ; Point to the mapping
794 bl mapDropBusy ; Drop the busy here since we need to come back
795 li r3,mapRtRemove ; Say we are still removing this
801 la r3,pmapSXlk(r28) ; Point to the pmap search lock
802 bl sxlkUnlock ; Unlock the search list
803 li r3,mapRtNotFnd ; No mapping found
805 hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
807 mtmsr r17 ; Restore enables/translation/etc.
809 b hrmRetnCmn ; Join the common return code...
811 hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
813 b hrmRetnCmn ; Join the common return code...
817 hrmNot1st: mr. r8,r9 ; Remember and test current node
818 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
819 lwz r9,mpAlias+4(r9) ; Chain to the next
820 cmplw r9,r31 ; Is this us?
821 bne- hrmNot1st ; Not us...
823 lwz r9,mpAlias+4(r9) ; Get our forward pointer
824 stw r9,mpAlias+4(r8) ; Unchain us
828 hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
830 hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
831 mr r3,r31 ; Copy the pointer to the mapping
832 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
833 bl mapDrainBusy ; Go wait until mapping is unused
835 xor r3,r31,r8 ; Flip mapping address to virtual
837 mtmsr r17 ; Restore enables/translation/etc.
840 hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
841 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
842 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
843 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
844 mr. r6,r6 ; Should we pass back the "next" vaddr?
845 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
846 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
847 mtlr r0 ; Restore the return
849 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
850 beq hrmNoNextAdr ; Do not pass back the next vaddr...
851 stw r15,0(r6) ; Pass back the top of the next vaddr
852 stw r16,4(r6) ; Pass back the bottom of the next vaddr
855 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
856 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
857 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
858 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
859 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
860 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
861 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
862 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
863 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
864 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
865 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
866 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
867 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
868 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
869 lwz r1,0(r1) ; Pop the stack
873 ; Here is where we come when all is lost. Somehow, we failed a mapping function
874 ; that must work... All hope is gone. Alas, we die.......
877 hrmPanic: lis r0,hi16(Choke) ; System abend
878 ori r0,r0,lo16(Choke) ; System abend
879 li r3,failMapping ; Show that we failed some kind of mapping thing
884 ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
885 ; in the range. Then, if we did not finish, return a code indicating that we need to
886 ; be called again. Eventually, we will finish and then, we will do a TLBIE for each
887 ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
889 ; A potential speed up is that we stop the invalidate loop once we have walked through
890 ; the hash table once. This really is not worth the trouble because we need to have
891 ; mapped 1/2 of physical RAM in an individual block. Way unlikely.
893 ; We should rethink this and see if we think it will be faster to check PTE and
894 ; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
899 hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu
900 rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
901 lhz r25,mpBSize(r31) ; Get the number of pages in block
902 lhz r23,mpSpace(r31) ; Get the address space hash
903 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
904 rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13
905 addi r25,r25,1 ; Account for zero-based counting
906 ori r0,r20,mpRIP ; Turn on the remove in progress flag
907 slw r25,r25,r29 ; Adjust for 32MB if needed
908 mfsdr1 r29 ; Get the hash table base and size
909 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
910 subi r25,r25,1 ; Convert back to zero-based counting
911 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
912 sub r4,r25,r9 ; Get number of pages left
913 cmplw cr1,r9,r25 ; Have we already hit the end?
914 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
915 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
916 rlwinm r26,r29,16,7,15 ; Get the hash table size
917 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
918 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
919 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
920 cmpwi cr7,r2,0 ; Remember if we have finished
921 slwi r0,r9,12 ; Make cursor into page offset
922 or r24,r24,r23 ; Get full hash
923 and r4,r4,r2 ; If more than a chunk, bring this back to 0
924 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
925 add r27,r27,r0 ; Adjust vaddr to start of current chunk
926 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
928 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
930 la r3,pmapSXlk(r28) ; Point to the pmap search lock
931 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
932 bl sxlkUnlock ; Unlock the search list while we are invalidating
934 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
935 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
936 xor r24,r24,r8 ; Get the proper VSID
937 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
938 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
939 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
940 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
941 add r22,r22,r30 ; Get end address (in PTEG units)
943 hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
944 xor r23,r23,r24 ; Hash it
945 and r23,r23,r26 ; Wrap it into the table
946 rlwinm r3,r23,28,4,29 ; Change to PCA offset
947 subfic r3,r3,-4 ; Get the PCA entry offset
948 add r7,r3,r29 ; Point to the PCA slot
949 cmplw cr5,r30,r22 ; Check if we reached the end of the range
950 addi r30,r30,64 ; bump to the next vaddr
952 bl mapLockPteg ; Lock the PTEG
954 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
955 add r5,r23,r29 ; Point to the PTEG
956 li r0,0 ; Set an invalid PTE value
957 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
958 mtcrf 0x80,r4 ; Set CRs to select PTE slots
959 mtcrf 0x40,r4 ; Set CRs to select PTE slots
961 bf 0,hrmSlot0 ; No autogen here
962 stw r0,0x00(r5) ; Invalidate PTE
964 hrmSlot0: bf 1,hrmSlot1 ; No autogen here
965 stw r0,0x08(r5) ; Invalidate PTE
967 hrmSlot1: bf 2,hrmSlot2 ; No autogen here
968 stw r0,0x10(r5) ; Invalidate PTE
970 hrmSlot2: bf 3,hrmSlot3 ; No autogen here
971 stw r0,0x18(r5) ; Invalidate PTE
973 hrmSlot3: bf 4,hrmSlot4 ; No autogen here
974 stw r0,0x20(r5) ; Invalidate PTE
976 hrmSlot4: bf 5,hrmSlot5 ; No autogen here
977 stw r0,0x28(r5) ; Invalidate PTE
979 hrmSlot5: bf 6,hrmSlot6 ; No autogen here
980 stw r0,0x30(r5) ; Invalidate PTE
982 hrmSlot6: bf 7,hrmSlot7 ; No autogen here
983 stw r0,0x38(r5) ; Invalidate PTE
985 hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
986 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
987 andc r6,r6,r0 ; Turn off all the old autogen bits
989 hrmBNone32: eieio ; Make sure all updates come first
991 stw r6,0(r7) ; Unlock and set the PCA
993 bne+ cr5,hrmBInv32 ; Go invalidate the next...
995 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
997 mr r3,r31 ; Copy the pointer to the mapping
998 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1000 sync ; Make sure memory is consistent
1002 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1003 li r6,63 ; Assume full invalidate for now
1004 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1005 andc r6,r6,r5 ; Clear max if we have less to do
1006 and r5,r25,r5 ; Clear count if we have more than max
1007 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1008 li r7,tlbieLock ; Get the TLBIE lock
1009 or r5,r5,r6 ; Get number of TLBIEs needed
1011 hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1012 mr. r2,r2 ; Is it locked?
1013 li r2,1 ; Get our lock value
1014 bne- hrmBTLBlck ; It is locked, go wait...
1015 stwcx. r2,0,r7 ; Try to get it
1016 bne- hrmBTLBlck ; We was beat...
1018 hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1019 tlbie r27 ; Invalidate it everywhere
1020 addi r27,r27,0x1000 ; Up to the next page
1021 bge+ hrmBTLBi ; Make sure we have done it all...
1023 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1024 li r2,0 ; Lock clear value
1026 sync ; Make sure all is quiet
1027 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1029 eieio ; Make sure that the tlbie happens first
1030 tlbsync ; Wait for everyone to catch up
1031 sync ; Wait for quiet again
1033 hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1035 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1036 bl sxlkShared ; Go get a shared lock on the mapping lists
1037 mr. r3,r3 ; Did we get the lock?
1038 bne- hrmPanic ; Nope...
1040 lwz r4,mpVAddr(r31) ; High order of address
1041 lwz r5,mpVAddr+4(r31) ; Low order of address
1042 mr r3,r28 ; Pass in pmap to search
1043 mr r29,r4 ; Save this in case we need it (only promote fails)
1044 mr r30,r5 ; Save this in case we need it (only promote fails)
1045 bl EXT(mapSearchFull) ; Go see if we can find it
1047 mr. r3,r3 ; Did we? (And remember mapping address for later)
1048 mr r15,r4 ; Save top of next vaddr
1049 mr r16,r5 ; Save bottom of next vaddr
1050 beq- hrmPanic ; Nope, not found...
1052 cmplw r3,r31 ; Same mapping?
1053 bne- hrmPanic ; Not good...
1055 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1056 bl sxlkPromote ; Try to promote shared to exclusive
1057 mr. r3,r3 ; Could we?
1058 mr r3,r31 ; Restore the mapping pointer
1059 beq+ hrmBDone1 ; Yeah...
1061 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1062 bl sxlkConvert ; Convert shared to exclusive
1063 mr. r3,r3 ; Could we?
1064 bne-- hrmPanic ; Nope, we must have timed out...
1066 mr r3,r28 ; Pass in pmap to search
1067 mr r4,r29 ; High order of address
1068 mr r5,r30 ; Low order of address
1069 bl EXT(mapSearchFull) ; Rescan the list
1071 mr. r3,r3 ; Did we lose it when we converted?
1072 mr r15,r4 ; Save top of next vaddr
1073 mr r16,r5 ; Save bottom of next vaddr
1074 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1076 hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1078 mr r3,r28 ; Get the pmap to remove from
1079 mr r4,r31 ; Point to the mapping
1080 bl EXT(mapRemove) ; Remove the mapping from the list
1082 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1083 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1084 subi r4,r4,1 ; Drop down the mapped page count
1085 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1086 bl sxlkUnlock ; Unlock the search list
1088 b hrmRetn32 ; We are all done, get out...
1091 ; Here we handle the 64-bit version of hw_rem_map
1096 hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1097 beq-- cr5,hrmBlock64 ; Go treat block specially...
1098 subfic r9,r9,-4 ; Get the PCA entry offset
1099 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1100 add r7,r9,r29 ; Point to the PCA slot
1102 bl mapLockPteg ; Go lock up the PTEG
1104 lwz r21,mpPte(r31) ; Get the quick pointer again
1105 ld r5,0(r26) ; Get the top of PTE
1107 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1108 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
1109 sldi r23,r5,16 ; Shift AVPN up to EA format
1110 // **** Need to adjust above shift based on the page size - large pages need to shift a bit more
1111 rldicr r5,r5,0,62 ; Clear the valid bit
1112 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1113 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1114 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1116 std r5,0(r26) ; Invalidate the PTE
1118 li r9,tlbieLock ; Get the TLBIE lock
1120 sync ; Make sure the invalid PTE is actually in memory
1122 hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1123 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1124 mr. r5,r5 ; Is it locked?
1125 li r5,1 ; Get locked indicator
1126 bne-- hrmPtlb64w ; It is locked, go spin...
1127 stwcx. r5,0,r9 ; Try to get it
1128 bne-- hrmPtlb64 ; We was beat...
1130 tlbie r23 ; Invalidate all corresponding TLB entries
1132 eieio ; Make sure that the tlbie happens first
1133 tlbsync ; Wait for everyone to catch up
1135 ptesync ; Make sure of it all
1136 li r0,0 ; Clear this
1137 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1138 stw r0,tlbieLock(0) ; Clear the tlbie lock
1139 oris r0,r0,0x8000 ; Assume slot 0
1141 srw r0,r0,r2 ; Get slot mask to deallocate
1143 lwz r22,12(r26) ; Get the latest reference and change bits
1144 or r6,r6,r0 ; Make the guy we killed free
1147 eieio ; Make sure all updates come first
1149 stw r6,0(r7) ; Unlock and change the PCA
1151 hrmPysDQ64: mr r3,r31 ; Point to the mapping
1152 bl mapDrainBusy ; Go wait until mapping is unused
1154 mr r3,r28 ; Get the pmap to remove from
1155 mr r4,r31 ; Point to the mapping
1156 bl EXT(mapRemove) ; Remove the mapping from the list
1158 rlwinm r0,r20,0,mpType ; Isolate mapping type
1159 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
1160 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1161 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1162 subi r4,r4,1 ; Drop down the mapped page count
1163 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1164 bl sxlkUnlock ; Unlock the search list
1166 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1168 bl mapPhysFindLock ; Go find and lock the physent
1170 li r0,ppLFAmask ; Get mask to clean up mapping pointer
1171 ld r9,ppLink(r3) ; Get first mapping
1172 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1173 mr r4,r22 ; Get the RC bits we just got
1175 bl mapPhysMerge ; Go merge the RC bits
1177 andc r9,r9,r0 ; Clean up the mapping pointer
1179 cmpld r9,r31 ; Are we the first on the list?
1180 bne-- hrmNot1st64 ; Nope...
1183 ld r4,mpAlias(r31) ; Get our forward pointer
1185 std r9,mpAlias(r31) ; Make sure we are off the chain
1186 bl mapPhyCSet64 ; Go set the physent link and preserve flags
1188 b hrmPhyDQd64 ; Join up and unlock it all...
1190 hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1191 stwcx. r5,0,r5 ; Clear the pending reservation
1194 hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1195 mr. r5,r5 ; is it locked?
1196 beq++ hrmPtlb64 ; Nope...
1197 b hrmPtlb64x ; Sniff some more...
1202 mr. r8,r9 ; Remember and test current node
1203 beq-- hrmPhyDQd64 ; Could not find our node...
1204 ld r9,mpAlias(r9) ; Chain to the next
1205 cmpld r9,r31 ; Is this us?
1206 bne-- hrmNot1st64 ; Not us...
1208 ld r9,mpAlias(r9) ; Get our forward pointer
1209 std r9,mpAlias(r8) ; Unchain us
1214 bl mapPhysUnlock ; Unlock the physent chain
1216 hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1217 mr r3,r31 ; Copy the pointer to the mapping
1218 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1219 bl mapDrainBusy ; Go wait until mapping is unused
1221 xor r3,r31,r8 ; Flip mapping address to virtual
1223 mtmsrd r17 ; Restore enables/translation/etc.
1226 b hrmRetnCmn ; Join the common return path...
1230 ; Check hrmBlock32 for comments.
1235 hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu
1236 rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
1237 lhz r24,mpSpace(r31) ; Get the address space hash
1238 lhz r25,mpBSize(r31) ; Get the number of pages in block
1239 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1240 rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13
1241 addi r25,r25,1 ; Account for zero-based counting
1242 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1243 slw r25,r25,r29 ; Adjust for 32MB if needed
1244 mfsdr1 r29 ; Get the hash table base and size
1245 ld r27,mpVAddr(r31) ; Get the base vaddr
1246 subi r25,r25,1 ; Convert back to zero-based counting
1247 rlwinm r5,r29,0,27,31 ; Isolate the size
1248 sub r4,r25,r9 ; Get number of pages left
1249 cmplw cr1,r9,r25 ; Have we already hit the end?
1250 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1251 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1252 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1253 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1254 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1255 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1256 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1257 srdi r27,r27,12 ; Change address into page index
1258 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1259 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1261 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1263 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1264 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1265 bl sxlkUnlock ; Unlock the search list while we are invalidating
1267 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1268 eqv r26,r26,r26 ; Get all foxes here
1269 rldimi r24,r24,28,8 ; Make a couple copies up higher
1270 rldicr r29,r29,0,47 ; Isolate just the hash table base
1271 subfic r5,r5,46 ; Get number of leading zeros
1272 srd r26,r26,r5 ; Shift the size bits over
1273 mr r30,r27 ; Get start of chunk to invalidate
1274 rldicr r26,r26,0,56 ; Make length in PTEG units
1275 add r22,r4,r30 ; Get end page number
1277 hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1278 rldicr r0,r0,0,49 ; Clean all but segment portion
1279 rlwinm r2,r30,0,16,31 ; Get the current page index
1280 xor r0,r0,r24 ; Form VSID
1281 xor r8,r2,r0 ; Hash the vaddr
1282 sldi r8,r8,7 ; Make into PTEG offset
1283 and r23,r8,r26 ; Wrap into the hash table
1284 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1285 subfic r3,r3,-4 ; Get the PCA entry offset
1286 add r7,r3,r29 ; Point to the PCA slot
1288 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1290 bl mapLockPteg ; Lock the PTEG
1292 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1293 add r5,r23,r29 ; Point to the PTEG
1294 li r0,0 ; Set an invalid PTE value
1295 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1296 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1297 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1300 bf 0,hrmSlot0s ; No autogen here
1301 std r0,0x00(r5) ; Invalidate PTE
1303 hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1304 std r0,0x10(r5) ; Invalidate PTE
1306 hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1307 std r0,0x20(r5) ; Invalidate PTE
1309 hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1310 std r0,0x30(r5) ; Invalidate PTE
1312 hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1313 std r0,0x40(r5) ; Invalidate PTE
1315 hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1316 std r0,0x50(r5) ; Invalidate PTE
1318 hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1319 std r0,0x60(r5) ; Invalidate PTE
1321 hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1322 std r0,0x70(r5) ; Invalidate PTE
1324 hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1325 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1326 andc r6,r6,r0 ; Turn off all the old autogen bits
1328 hrmBNone64: eieio ; Make sure all updates come first
1329 stw r6,0(r7) ; Unlock and set the PCA
1331 addi r30,r30,1 ; bump to the next PTEG
1332 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1334 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1336 mr r3,r31 ; Copy the pointer to the mapping
1337 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1339 sync ; Make sure memory is consistent
1341 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1342 li r6,255 ; Assume full invalidate for now
1343 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1344 andc r6,r6,r5 ; Clear max if we have less to do
1345 and r5,r25,r5 ; Clear count if we have more than max
1346 sldi r24,r24,28 ; Get the full XOR value over to segment position
1347 ld r27,mpVAddr(r31) ; Get the base vaddr
1348 li r7,tlbieLock ; Get the TLBIE lock
1349 or r5,r5,r6 ; Get number of TLBIEs needed
1351 hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1352 mr. r2,r2 ; Is it locked?
1353 li r2,1 ; Get our lock value
1354 bne-- hrmBTLBlcm ; It is locked, go wait...
1355 stwcx. r2,0,r7 ; Try to get it
1356 bne-- hrmBTLBlcl ; We was beat...
1358 hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1359 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1360 addic. r5,r5,-1 ; See if we did them all
1361 xor r2,r2,r24 ; Make the VSID
1362 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1363 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1365 tlbie r2 ; Invalidate it everywhere
1366 addi r27,r27,0x1000 ; Up to the next page
1367 bge++ hrmBTLBj ; Make sure we have done it all...
1369 eieio ; Make sure that the tlbie happens first
1370 tlbsync ; wait for everyone to catch up
1372 li r2,0 ; Lock clear value
1374 ptesync ; Wait for quiet again
1376 stw r2,tlbieLock(0) ; Clear the tlbie lock
1378 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1379 bl sxlkShared ; Go get a shared lock on the mapping lists
1380 mr. r3,r3 ; Did we get the lock?
1381 bne- hrmPanic ; Nope...
1383 lwz r4,mpVAddr(r31) ; High order of address
1384 lwz r5,mpVAddr+4(r31) ; Low order of address
1385 mr r3,r28 ; Pass in pmap to search
1386 mr r29,r4 ; Save this in case we need it (only promote fails)
1387 mr r30,r5 ; Save this in case we need it (only promote fails)
1388 bl EXT(mapSearchFull) ; Go see if we can find it
1390 mr. r3,r3 ; Did we? (And remember mapping address for later)
1391 mr r15,r4 ; Save top of next vaddr
1392 mr r16,r5 ; Save bottom of next vaddr
1393 beq- hrmPanic ; Nope, not found...
1395 cmpld r3,r31 ; Same mapping?
1396 bne- hrmPanic ; Not good...
1398 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1399 bl sxlkPromote ; Try to promote shared to exclusive
1400 mr. r3,r3 ; Could we?
1401 mr r3,r31 ; Restore the mapping pointer
1402 beq+ hrmBDone2 ; Yeah...
1404 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1405 bl sxlkConvert ; Convert shared to exclusive
1406 mr. r3,r3 ; Could we?
1407 bne-- hrmPanic ; Nope, we must have timed out...
1409 mr r3,r28 ; Pass in pmap to search
1410 mr r4,r29 ; High order of address
1411 mr r5,r30 ; Low order of address
1412 bl EXT(mapSearchFull) ; Rescan the list
1414 mr. r3,r3 ; Did we lose it when we converted?
1415 mr r15,r4 ; Save top of next vaddr
1416 mr r16,r5 ; Save bottom of next vaddr
1417 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1419 hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1421 mr r3,r28 ; Get the pmap to remove from
1422 mr r4,r31 ; Point to the mapping
1423 bl EXT(mapRemove) ; Remove the mapping from the list
1425 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1426 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1427 subi r4,r4,1 ; Drop down the mapped page count
1428 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1429 bl sxlkUnlock ; Unlock the search list
1431 b hrmRetn64 ; We are all done, get out...
1433 hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1434 stwcx. r2,0,r2 ; Unreserve it
1436 hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1437 mr. r2,r2 ; Is it held?
1438 beq++ hrmBTLBlcl ; Nope...
1439 b hrmBTLBlcn ; Yeah...
1442 ; Guest shadow assist -- mapping remove
1444 ; Method of operation:
1445 ; o Locate the VMM extension block and the host pmap
1446 ; o Obtain the host pmap's search lock exclusively
1447 ; o Locate the requested mapping in the shadow hash table,
1449 ; o If connected, disconnect the PTE and gather R&C to physent
1450 ; o Locate and lock the physent
1451 ; o Remove mapping from physent's chain
1453 ; o Unlock pmap's search lock
1455 ; Non-volatile registers on entry:
1456 ; r17: caller's msr image
1457 ; r19: sprg2 (feature flags)
1458 ; r28: guest pmap's physical address
1459 ; r29: high-order 32 bits of guest virtual address
1460 ; r30: low-order 32 bits of guest virtual address
1462 ; Non-volatile register usage:
1463 ; r26: VMM extension block's physical address
1464 ; r27: host pmap's physical address
1465 ; r28: guest pmap's physical address
1466 ; r29: physent's physical address
1467 ; r30: guest virtual address
1468 ; r31: guest mapping's physical address
1472 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1473 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1474 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1475 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1476 b hrmGStart ; Join common code
1478 hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1479 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1480 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1482 hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1483 bl sxlkExclusive ; Get lock exclusive
1485 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1487 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1488 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1489 srwi r11,r30,12 ; Form shadow hash:
1490 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1491 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1492 ; Form index offset from hash page number
1493 add r31,r31,r12 ; r31 <- hash page index entry
1494 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1495 mtctr r0 ; in this group
1496 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1497 lwz r31,4(r31) ; r31 <- hash page paddr
1498 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1499 ; r31 <- hash group paddr
1501 addi r3,r3,1 ; Increment remove request count
1502 stw r3,vxsGrm(r26) ; Update remove request count
1504 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1505 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1506 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1507 b hrmG32SrchLp ; Let the search begin!
1511 mr r6,r3 ; r6 <- current mapping slot's flags
1512 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1513 mr r7,r4 ; r7 <- current mapping slot's space ID
1514 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1515 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1516 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1517 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1518 xor r7,r7,r9 ; Compare space ID
1519 or r0,r11,r7 ; r0 <- !(free && space match)
1520 xor r8,r8,r30 ; Compare virtual address
1521 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1522 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1524 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1525 bdnz hrmG32SrchLp ; Iterate
1527 mr r6,r3 ; r6 <- current mapping slot's flags
1528 clrrwi r5,r5,12 ; Remove flags from virtual address
1529 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1530 xor r4,r4,r9 ; Compare space ID
1531 or r0,r11,r4 ; r0 <- !(free && space match)
1532 xor r5,r5,r30 ; Compare virtual address
1533 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1534 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1535 b hrmGSrchMiss ; No joy in our hash group
1538 ld r31,0(r31) ; r31 <- hash page paddr
1539 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1540 ; r31 <- hash group paddr
1541 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1542 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1543 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1544 b hrmG64SrchLp ; Let the search begin!
1548 mr r6,r3 ; r6 <- current mapping slot's flags
1549 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1550 mr r7,r4 ; r7 <- current mapping slot's space ID
1551 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1552 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1553 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1554 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1555 xor r7,r7,r9 ; Compare space ID
1556 or r0,r11,r7 ; r0 <- !(free && space match)
1557 xor r8,r8,r30 ; Compare virtual address
1558 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1559 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1561 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1562 bdnz hrmG64SrchLp ; Iterate
1564 mr r6,r3 ; r6 <- current mapping slot's flags
1565 clrrdi r5,r5,12 ; Remove flags from virtual address
1566 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1567 xor r4,r4,r9 ; Compare space ID
1568 or r0,r11,r4 ; r0 <- !(free && space match)
1569 xor r5,r5,r30 ; Compare virtual address
1570 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1571 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1573 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1574 li r25,mapRtNotFnd ; Return not found
1575 addi r3,r3,1 ; Increment miss count
1576 stw r3,vxsGrmMiss(r26) ; Update miss count
1577 b hrmGReturn ; Join guest return
1581 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1582 bne hrmGDormant ; Yes, nothing to disconnect
1584 lwz r3,vxsGrmActive(r26) ; Get active hit count
1585 addi r3,r3,1 ; Increment active hit count
1586 stw r3,vxsGrmActive(r26) ; Update hit count
1588 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1589 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1590 ; r31 <- mapping's physical address
1591 ; r3 -> PTE slot physical address
1592 ; r4 -> High-order 32 bits of PTE
1593 ; r5 -> Low-order 32 bits of PTE
1595 ; r7 -> PCA physical address
1596 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1597 b hrmGFreePTE ; Join 64-bit path to release the PTE
1599 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1600 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1602 mr. r3,r3 ; Was there a valid PTE?
1603 beq hrmGDormant ; No valid PTE, we're almost done
1604 lis r0,0x8000 ; Prepare free bit for this slot
1605 srw r0,r0,r2 ; Position free bit
1606 or r6,r6,r0 ; Set it in our PCA image
1607 lwz r8,mpPte(r31) ; Get PTE offset
1608 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1609 stw r8,mpPte(r31) ; Save invalidated PTE offset
1610 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1611 stw r6,0(r7) ; Update PCA and unlock the PTEG
1614 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1615 bl mapFindLockPN ; Find 'n' lock this page's physent
1616 mr. r29,r3 ; Got lock on our physent?
1617 beq-- hrmGBadPLock ; No, time to bail out
1619 crset cr1_eq ; cr1_eq <- previous link is the anchor
1620 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1621 la r11,ppLink+4(r29) ; Point to chain anchor
1622 lwz r9,ppLink+4(r29) ; Get chain anchor
1623 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1625 beq- hrmGPEMissMiss ; End of chain, this is not good
1626 cmplw r9,r31 ; Is this the mapping to remove?
1627 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1628 bne hrmGRemNext ; No, chain onward
1629 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1630 stw r8,0(r11) ; Unchain gpv->phys mapping
1631 b hrmGDelete ; Finish deleting mapping
1633 lwarx r0,0,r11 ; Get previous link
1634 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1635 stwcx. r0,0,r11 ; Update previous link
1636 bne- hrmGRemRetry ; Lost reservation, retry
1637 b hrmGDelete ; Finish deleting mapping
1640 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1641 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1642 mr. r9,r8 ; Does next entry exist?
1643 b hrmGRemLoop ; Carry on
1646 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1647 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1648 la r11,ppLink(r29) ; Point to chain anchor
1649 ld r9,ppLink(r29) ; Get chain anchor
1650 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1652 beq-- hrmGPEMissMiss ; End of chain, this is not good
1653 cmpld r9,r31 ; Is this the mapping to remove?
1654 ld r8,mpAlias(r9) ; Get forward chain pinter
1655 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1656 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1657 std r8,0(r11) ; Unchain gpv->phys mapping
1658 b hrmGDelete ; Finish deleting mapping
1660 ldarx r0,0,r11 ; Get previous link
1661 and r0,r0,r7 ; Get flags
1662 or r0,r0,r8 ; Insert new forward pointer
1663 stdcx. r0,0,r11 ; Slam it back in
1664 bne-- hrmGRem64Rt ; Lost reservation, retry
1665 b hrmGDelete ; Finish deleting mapping
1669 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1670 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1671 mr. r9,r8 ; Does next entry exist?
1672 b hrmGRem64Lp ; Carry on
1675 mr r3,r29 ; r3 <- physent addr
1676 bl mapPhysUnlock ; Unlock physent chain
1677 lwz r3,mpFlags(r31) ; Get mapping's flags
1678 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1679 ori r3,r3,mpgFree ; Mark mapping free
1680 stw r3,mpFlags(r31) ; Update flags
1681 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1684 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1685 bl sxlkUnlock ; Release host pmap search lock
1687 mr r3,r25 ; r3 <- return code
1688 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1689 mtmsr r17 ; Restore 'rupts, translation
1690 isync ; Throw a small wrench into the pipeline
1691 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1692 hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1693 b hrmRetnCmn ; Join common return
1697 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1698 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1699 li r3,failMapping ; All the way from New Orleans
1704 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1706 * Upon entry, R3 contains a pointer to a physent.
1708 * This function removes the first mapping from a physical entry
1709 * alias list. It locks the list, extracts the vaddr and pmap from
1710 * the first entry. It then jumps into the hw_rem_map function.
1711 * NOTE: since we jump into rem_map, we need to set up the stack
1712 * identically. Also, we set the next parm to 0 so we do not
1713 * try to save a next vaddr.
1715 * We return the virtual address of the removed mapping as a
1718 * Note that this is designed to be called from 32-bit mode with a stack.
1720 * We disable translation and all interruptions here. This keeps is
1721 * from having to worry about a deadlock due to having anything locked
1722 * and needing it to process a fault.
1724 * Note that this must be done with both interruptions off and VM off
1727 * Remove mapping via physical page (mapping_purge)
1730 * 2) extract vaddr and pmap
1732 * 4) do "remove mapping via pmap"
1738 .globl EXT(hw_purge_phys)
1741 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1742 mflr r0 ; Save the link register
1743 stw r15,FM_ARG0+0x00(r1) ; Save a register
1744 stw r16,FM_ARG0+0x04(r1) ; Save a register
1745 stw r17,FM_ARG0+0x08(r1) ; Save a register
1746 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1747 stw r19,FM_ARG0+0x10(r1) ; Save a register
1748 stw r20,FM_ARG0+0x14(r1) ; Save a register
1749 stw r21,FM_ARG0+0x18(r1) ; Save a register
1750 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1751 stw r23,FM_ARG0+0x20(r1) ; Save a register
1752 stw r24,FM_ARG0+0x24(r1) ; Save a register
1753 stw r25,FM_ARG0+0x28(r1) ; Save a register
1754 li r6,0 ; Set no next address return
1755 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1756 stw r27,FM_ARG0+0x30(r1) ; Save a register
1757 stw r28,FM_ARG0+0x34(r1) ; Save a register
1758 stw r29,FM_ARG0+0x38(r1) ; Save a register
1759 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1760 stw r31,FM_ARG0+0x40(r1) ; Save a register
1761 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1762 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1764 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1766 bl mapPhysLock ; Lock the physent
1768 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1770 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1771 li r0,ppFlags ; Set the bottom stuff to clear
1772 b hppJoin ; Join the common...
1774 hppSF: li r0,ppLFAmask
1775 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1776 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1778 hppJoin: andc. r12,r12,r0 ; Clean and test link
1779 beq-- hppNone ; There are no more mappings on physical page
1781 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1782 lhz r7,mpSpace(r12) ; Get the address space hash
1783 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1784 slwi r0,r7,2 ; Multiply space by 4
1785 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1786 slwi r7,r7,3 ; Multiply space by 8
1787 lwz r5,mpVAddr+4(r12) ; and the bottom
1788 add r7,r7,r0 ; Get correct displacement into translate table
1789 lwz r28,0(r28) ; Get the actual translation map
1791 add r28,r28,r7 ; Point to the pmap translation
1793 bl mapPhysUnlock ; Time to unlock the physical entry
1795 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1797 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1798 b hrmJoin ; Go remove the mapping...
1800 hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1801 b hrmJoin ; Go remove the mapping...
1805 hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1807 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1809 mtmsr r11 ; Restore enables/translation/etc.
1811 b hppRetnCmn ; Join the common return code...
1813 hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1817 ; NOTE: we have not used any registers other than the volatiles to this point
1820 hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1822 li r3,mapRtEmpty ; Physent chain is empty
1823 mtlr r12 ; Restore the return
1824 lwz r1,0(r1) ; Pop the stack
1828 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1830 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1831 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1833 * We return the virtual address of the removed mapping as a
1836 * Note that this is designed to be called from 32-bit mode with a stack.
1838 * We disable translation and all interruptions here. This keeps is
1839 * from having to worry about a deadlock due to having anything locked
1840 * and needing it to process a fault.
1842 * Note that this must be done with both interruptions off and VM off
1844 * Remove a mapping which can be reestablished by VM
1849 .globl EXT(hw_purge_map)
1852 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1853 mflr r0 ; Save the link register
1854 stw r15,FM_ARG0+0x00(r1) ; Save a register
1855 stw r16,FM_ARG0+0x04(r1) ; Save a register
1856 stw r17,FM_ARG0+0x08(r1) ; Save a register
1857 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1858 stw r19,FM_ARG0+0x10(r1) ; Save a register
1859 mfsprg r19,2 ; Get feature flags
1860 stw r20,FM_ARG0+0x14(r1) ; Save a register
1861 stw r21,FM_ARG0+0x18(r1) ; Save a register
1862 mtcrf 0x02,r19 ; move pf64Bit cr6
1863 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1864 stw r23,FM_ARG0+0x20(r1) ; Save a register
1865 stw r24,FM_ARG0+0x24(r1) ; Save a register
1866 stw r25,FM_ARG0+0x28(r1) ; Save a register
1867 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1868 stw r27,FM_ARG0+0x30(r1) ; Save a register
1869 stw r28,FM_ARG0+0x34(r1) ; Save a register
1870 stw r29,FM_ARG0+0x38(r1) ; Save a register
1871 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1872 stw r31,FM_ARG0+0x40(r1) ; Save a register
1873 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1874 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1877 lwz r11,pmapFlags(r3) ; Get pmaps flags
1878 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1879 bne hpmPanic ; Call not valid for guest shadow assist pmap
1882 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1883 lwz r9,pmapvr+4(r3) ; Get conversion mask
1886 hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1889 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1891 xor r28,r3,r9 ; Convert the pmap to physical addressing
1893 mr r17,r11 ; Save the MSR
1895 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1896 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1897 mr. r3,r3 ; Did we get the lock?
1898 bne-- hrmBadLock ; Nope...
1900 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
1901 ; here so that we will know the previous elements so we can dequeue them
1905 mr r3,r28 ; Pass in pmap to search
1906 mr r29,r4 ; Top half of vaddr
1907 mr r30,r5 ; Bottom half of vaddr
1908 bl EXT(mapSearchFull) ; Rescan the list
1909 mr. r31,r3 ; Did we? (And remember mapping address for later)
1910 or r0,r4,r5 ; Are we beyond the end?
1911 mr r15,r4 ; Save top of next vaddr
1912 cmplwi cr1,r0,0 ; See if there is another
1913 mr r16,r5 ; Save bottom of next vaddr
1914 bne-- hpmGotOne ; We found one, go check it out...
1916 hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1917 b hrmNotFound ; No more in pmap to check...
1919 hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1920 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
1921 rlwinm r21,r20,8,24,31 ; Extract the busy count
1922 cmplwi cr2,r21,0 ; Is it busy?
1923 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
1924 beq++ hrmGotX ; Found, branch to remove the mapping...
1925 b hpmCNext ; Nope...
1927 hpmPanic: lis r0,hi16(Choke) ; System abend
1928 ori r0,r0,lo16(Choke) ; System abend
1929 li r3,failMapping ; Show that we failed some kind of mapping thing
1933 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1935 * Upon entry, R3 contains a pointer to a pmap.
1936 * pa is a pointer to the physent
1938 * This function removes the first mapping for a specific pmap from a physical entry
1939 * alias list. It locks the list, extracts the vaddr and pmap from
1940 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1941 * NOTE: since we jump into rem_map, we need to set up the stack
1942 * identically. Also, we set the next parm to 0 so we do not
1943 * try to save a next vaddr.
1945 * We return the virtual address of the removed mapping as a
1948 * Note that this is designed to be called from 32-bit mode with a stack.
1950 * We disable translation and all interruptions here. This keeps is
1951 * from having to worry about a deadlock due to having anything locked
1952 * and needing it to process a fault.
1954 * Note that this must be done with both interruptions off and VM off
1957 * Remove mapping via physical page (mapping_purge)
1960 * 2) extract vaddr and pmap
1962 * 4) do "remove mapping via pmap"
1968 .globl EXT(hw_purge_space)
1970 LEXT(hw_purge_space)
1971 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1972 mflr r0 ; Save the link register
1973 stw r15,FM_ARG0+0x00(r1) ; Save a register
1974 stw r16,FM_ARG0+0x04(r1) ; Save a register
1975 stw r17,FM_ARG0+0x08(r1) ; Save a register
1976 mfsprg r2,2 ; Get feature flags
1977 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1978 stw r19,FM_ARG0+0x10(r1) ; Save a register
1979 stw r20,FM_ARG0+0x14(r1) ; Save a register
1980 stw r21,FM_ARG0+0x18(r1) ; Save a register
1981 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1982 mtcrf 0x02,r2 ; move pf64Bit cr6
1983 stw r23,FM_ARG0+0x20(r1) ; Save a register
1984 stw r24,FM_ARG0+0x24(r1) ; Save a register
1985 stw r25,FM_ARG0+0x28(r1) ; Save a register
1986 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1987 stw r27,FM_ARG0+0x30(r1) ; Save a register
1988 li r6,0 ; Set no next address return
1989 stw r28,FM_ARG0+0x34(r1) ; Save a register
1990 stw r29,FM_ARG0+0x38(r1) ; Save a register
1991 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1992 stw r31,FM_ARG0+0x40(r1) ; Save a register
1993 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1994 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1997 lwz r11,pmapFlags(r4) ; Get pmaps flags
1998 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1999 bne hpsPanic ; Call not valid for guest shadow assist pmap
2002 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
2004 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
2008 hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2010 hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2012 xor r4,r4,r9 ; Convert the pmap to physical addressing
2014 bl mapPhysLock ; Lock the physent
2016 lwz r8,pmapSpace(r4) ; Get the space hash
2018 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2020 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2022 hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2023 beq hpsNone ; Did not find one...
2025 lhz r10,mpSpace(r12) ; Get the space
2027 cmplw r10,r8 ; Is this one of ours?
2030 lwz r12,mpAlias+4(r12) ; Chain on to the next
2031 b hpsSrc32 ; Check it out...
2035 hpsSF: li r0,ppLFAmask
2036 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2037 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2039 hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2040 beq hpsNone ; Did not find one...
2042 lhz r10,mpSpace(r12) ; Get the space
2044 cmplw r10,r8 ; Is this one of ours?
2047 ld r12,mpAlias(r12) ; Chain on to the next
2048 b hpsSrc64 ; Check it out...
2052 hpsFnd: mr r28,r4 ; Set the pmap physical address
2053 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2054 lwz r5,mpVAddr+4(r12) ; and the bottom
2056 bl mapPhysUnlock ; Time to unlock the physical entry
2057 b hrmJoin ; Go remove the mapping...
2061 hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2063 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
2065 mtmsr r11 ; Restore enables/translation/etc.
2067 b hpsRetnCmn ; Join the common return code...
2069 hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2073 ; NOTE: we have not used any registers other than the volatiles to this point
2076 hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2078 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
2079 mtlr r12 ; Restore the return
2080 lwz r1,0(r1) ; Pop the stack
2083 hpsPanic: lis r0,hi16(Choke) ; System abend
2084 ori r0,r0,lo16(Choke) ; System abend
2085 li r3,failMapping ; Show that we failed some kind of mapping thing
2089 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2090 * on this physent chain
2092 * Locates the first guest mapping on the physent chain that is associated with the
2093 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2094 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2095 * repeatedly until no additional guest mappings that match our criteria are removed.
2097 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2100 * r3 : physent, 32-bit kernel virtual address
2101 * r4 : host pmap, 32-bit kernel virtual address
2103 * Volatile register usage (for linkage through hrmJoin):
2104 * r4 : high-order 32 bits of guest virtual address
2105 * r5 : low-order 32 bits of guest virtual address
2106 * r11: saved MSR image
2108 * Non-volatile register usage:
2109 * r26: VMM extension block's physical address
2110 * r27: host pmap's physical address
2111 * r28: guest pmap's physical address
2116 .globl EXT(hw_scrub_guest)
2118 LEXT(hw_scrub_guest)
2119 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2120 mflr r0 ; Save the link register
2121 stw r15,FM_ARG0+0x00(r1) ; Save a register
2122 stw r16,FM_ARG0+0x04(r1) ; Save a register
2123 stw r17,FM_ARG0+0x08(r1) ; Save a register
2124 mfsprg r2,2 ; Get feature flags
2125 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2126 stw r19,FM_ARG0+0x10(r1) ; Save a register
2127 stw r20,FM_ARG0+0x14(r1) ; Save a register
2128 stw r21,FM_ARG0+0x18(r1) ; Save a register
2129 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2130 mtcrf 0x02,r2 ; move pf64Bit cr6
2131 stw r23,FM_ARG0+0x20(r1) ; Save a register
2132 stw r24,FM_ARG0+0x24(r1) ; Save a register
2133 stw r25,FM_ARG0+0x28(r1) ; Save a register
2134 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2135 stw r27,FM_ARG0+0x30(r1) ; Save a register
2136 li r6,0 ; Set no next address return
2137 stw r28,FM_ARG0+0x34(r1) ; Save a register
2138 stw r29,FM_ARG0+0x38(r1) ; Save a register
2139 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2140 stw r31,FM_ARG0+0x40(r1) ; Save a register
2141 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2142 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2144 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2146 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2147 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2148 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2149 b hsgStart ; Get to work
2151 hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2152 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2154 hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2155 xor r27,r4,r9 ; Convert host pmap_t virt->real
2156 bl mapPhysLock ; Lock the physent
2158 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2160 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2161 hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2162 beq hsg32Miss ; Did not find one...
2163 lwz r8,mpFlags(r12) ; Get mapping's flags
2164 lhz r7,mpSpace(r12) ; Get mapping's space id
2165 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2166 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2167 xori r8,r8,mpGuest ; Is it a guest mapping?
2168 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2169 slwi r9,r7,2 ; Multiply space by 4
2170 lwz r28,0(r28) ; Get the actual translation map
2171 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2172 slwi r7,r7,3 ; Multiply space by 8
2173 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2174 add r7,r7,r9 ; Get correct displacement into translate table
2175 add r28,r28,r7 ; Point to the pmap translation
2176 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2177 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2178 xor r7,r7,r26 ; Is guest associated with specified host?
2179 or. r7,r7,r8 ; Guest mapping && associated with host?
2180 lwz r12,mpAlias+4(r12) ; Chain on to the next
2181 bne hsg32Loop ; Try next mapping on alias chain
2183 hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2184 b hrmJoin ; Join common path for mapping removal
2187 hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2188 mtmsr r11 ; Restore 'rupts, translation
2189 isync ; Throw a small wrench into the pipeline
2190 li r3,mapRtEmpty ; No mappings found matching specified criteria
2191 b hrmRetnCmn ; Exit through common epilog
2194 hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2195 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2196 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2197 hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2198 beq hsg64Miss ; Did not find one...
2199 lwz r8,mpFlags(r12) ; Get mapping's flags
2200 lhz r7,mpSpace(r12) ; Get mapping's space id
2201 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2202 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2203 xori r8,r8,mpGuest ; Is it a guest mapping?
2204 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2205 slwi r9,r7,2 ; Multiply space by 4
2206 lwz r28,0(r28) ; Get the actual translation map
2207 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2208 slwi r7,r7,3 ; Multiply space by 8
2209 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2210 add r7,r7,r9 ; Get correct displacement into translate table
2211 add r28,r28,r7 ; Point to the pmap translation
2212 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2213 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2214 xor r7,r7,r26 ; Is guest associated with specified host?
2215 or. r7,r7,r8 ; Guest mapping && associated with host?
2216 ld r12,mpAlias(r12) ; Chain on to the next
2217 bne hsg64Loop ; Try next mapping on alias chain
2219 hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2220 b hrmJoin ; Join common path for mapping removal
2223 hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
2224 mtmsrd r11 ; Restore 'rupts, translation
2225 li r3,mapRtEmpty ; No mappings found matching specified criteria
2226 b hrmRetnCmn ; Exit through common epilog
2230 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2232 * Upon entry, R3 contains a pointer to a physent.
2233 * space is the space ID from the pmap in question
2235 * We return the virtual address of the found mapping in
2236 * R3. Note that the mapping busy is bumped.
2238 * Note that this is designed to be called from 32-bit mode with a stack.
2240 * We disable translation and all interruptions here. This keeps is
2241 * from having to worry about a deadlock due to having anything locked
2242 * and needing it to process a fault.
2247 .globl EXT(hw_find_space)
2250 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2251 mflr r0 ; Save the link register
2252 mr r8,r4 ; Remember the space
2253 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2255 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2257 bl mapPhysLock ; Lock the physent
2259 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2261 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2263 hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2264 beq hfsNone ; Did not find one...
2266 lhz r10,mpSpace(r12) ; Get the space
2268 cmplw r10,r8 ; Is this one of ours?
2271 lwz r12,mpAlias+4(r12) ; Chain on to the next
2272 b hfsSrc32 ; Check it out...
2276 hfsSF: li r0,ppLFAmask
2277 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2278 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2280 hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2281 beq hfsNone ; Did not find one...
2283 lhz r10,mpSpace(r12) ; Get the space
2285 cmplw r10,r8 ; Is this one of ours?
2288 ld r12,mpAlias(r12) ; Chain on to the next
2289 b hfsSrc64 ; Check it out...
2293 hfsFnd: mr r8,r3 ; Save the physent
2294 mr r3,r12 ; Point to the mapping
2295 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2297 mr r3,r8 ; Get back the physical entry
2298 li r7,0xFFF ; Get a page size mask
2299 bl mapPhysUnlock ; Time to unlock the physical entry
2301 andc r3,r12,r7 ; Move the mapping back down to a page
2302 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2303 xor r12,r3,r12 ; Convert to virtual
2304 b hfsRet ; Time to return
2308 hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2310 hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
2312 mtmsr r11 ; Restore enables/translation/etc.
2314 b hfsRetnCmn ; Join the common return code...
2316 hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2320 ; NOTE: we have not used any registers other than the volatiles to this point
2323 hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
2326 mr. r3,r3 ; Anything to return?
2327 beq hfsRetnNull ; Nope
2328 lwz r11,mpFlags(r3) ; Get mapping flags
2329 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2330 cmplwi r0,mpGuest ; Shadow guest mapping?
2331 beq hfsPanic ; Yup, kick the bucket
2335 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2337 mtlr r12 ; Restore the return
2338 lwz r1,0(r1) ; Pop the stack
2341 hfsPanic: lis r0,hi16(Choke) ; System abend
2342 ori r0,r0,lo16(Choke) ; System abend
2343 li r3,failMapping ; Show that we failed some kind of mapping thing
2347 ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2348 ; Returns 0 if not found or the virtual address of the mapping if
2349 ; if is. Also, the mapping has the busy count bumped.
2352 .globl EXT(hw_find_map)
2355 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2356 mflr r0 ; Save the link register
2357 stw r25,FM_ARG0+0x00(r1) ; Save a register
2358 stw r26,FM_ARG0+0x04(r1) ; Save a register
2359 mr r25,r6 ; Remember address of next va
2360 stw r27,FM_ARG0+0x08(r1) ; Save a register
2361 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2362 stw r29,FM_ARG0+0x10(r1) ; Save a register
2363 stw r30,FM_ARG0+0x14(r1) ; Save a register
2364 stw r31,FM_ARG0+0x18(r1) ; Save a register
2365 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2368 lwz r11,pmapFlags(r3) ; Get pmaps flags
2369 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2370 bne hfmPanic ; Call not valid for guest shadow assist pmap
2373 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2374 lwz r7,pmapvr+4(r3) ; Get the second part
2377 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2379 mr r27,r11 ; Remember the old MSR
2380 mr r26,r12 ; Remember the feature bits
2382 xor r28,r3,r7 ; Change the common 32- and 64-bit half
2384 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
2386 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
2388 hfmSF1: mr r29,r4 ; Save top half of vaddr
2389 mr r30,r5 ; Save the bottom half
2391 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2392 bl sxlkShared ; Go get a shared lock on the mapping lists
2393 mr. r3,r3 ; Did we get the lock?
2394 bne-- hfmBadLock ; Nope...
2396 mr r3,r28 ; get the pmap address
2397 mr r4,r29 ; Get bits 0:31 to look for
2398 mr r5,r30 ; Get bits 32:64
2400 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
2402 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2403 mr. r31,r3 ; Save the mapping if we found it
2404 cmplwi cr1,r0,0 ; Are we removing?
2405 mr r29,r4 ; Save next va high half
2406 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2407 mr r30,r5 ; Save next va low half
2408 li r6,0 ; Assume we did not find it
2409 li r26,0xFFF ; Get a mask to relocate to start of mapping page
2411 bt-- cr0_eq,hfmNotFnd ; We did not find it...
2413 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2415 andc r4,r31,r26 ; Get back to the mapping page start
2417 ; Note: we can treat 32- and 64-bit the same here. Because we are going from
2418 ; physical to virtual and we only do 32-bit virtual, we only need the low order
2421 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2422 li r6,-1 ; Indicate we found it and it is not being removed
2423 xor r31,r31,r4 ; Flip to virtual
2425 hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2426 bl sxlkUnlock ; Unlock the search list
2428 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2429 and r3,r3,r6 ; Clear if not found or removing
2431 hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
2433 mtmsr r27 ; Restore enables/translation/etc.
2435 b hfmReturnC ; Join common...
2437 hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2440 hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2441 stw r30,4(r25) ; Save the bottom of the next va
2442 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2443 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2444 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2445 and r3,r3,r6 ; Clear return if the mapping is being removed
2446 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2447 mtlr r0 ; Restore the return
2448 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2449 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2450 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2451 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2452 lwz r1,0(r1) ; Pop the stack
2457 hfmBadLock: li r3,1 ; Set lock time out error code
2458 b hfmReturn ; Leave....
2460 hfmPanic: lis r0,hi16(Choke) ; System abend
2461 ori r0,r0,lo16(Choke) ; System abend
2462 li r3,failMapping ; Show that we failed some kind of mapping thing
2467 * void hw_clear_maps(void)
2469 * Remove all mappings for all phys entries.
2475 .globl EXT(hw_clear_maps)
2478 mflr r10 ; Save the link register
2479 mfcr r9 ; Save the condition register
2480 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2482 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2483 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2486 lwz r3,mrPhysTab(r5) ; Get the actual table address
2487 lwz r0,mrStart(r5) ; Get start of table entry
2488 lwz r4,mrEnd(r5) ; Get end of table entry
2489 addi r5,r5,mrSize ; Point to the next regions
2491 cmplwi r3,0 ; No more regions?
2492 beq-- hcmDone ; Leave...
2494 sub r4,r4,r0 ; Calculate physical entry count
2498 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2502 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2503 addi r3,r3,physEntrySize ; Next phys_entry
2506 rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address
2507 beq hcmNoMap32 ; Did not find one...
2509 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2510 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2511 stw r0,mpPte(r4) ; Get the quick pointer again
2513 lwz r4,mpAlias+4(r4) ; Chain on to the next
2514 b hcmNextMap32 ; Check it out...
2522 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2523 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2524 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2525 addi r3,r3,physEntrySize ; Next phys_entry
2528 andc. r4,r4,r0 ; Clean and test mapping address
2529 beq hcmNoMap64 ; Did not find one...
2531 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2532 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2533 stw r0,mpPte(r4) ; Get the quick pointer again
2535 ld r4,mpAlias(r4) ; Chain on to the next
2536 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2537 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2538 b hcmNextMap64 ; Check it out...
2546 mtlr r10 ; Restore the return
2547 mtcr r9 ; Restore the condition register
2548 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2550 mtmsr r11 ; Restore translation/mode/etc.
2555 mtmsrd r11 ; Restore translation/mode/etc.
2562 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
2563 * walks all mapping for a physical page and performs
2564 * specified operations on each.
2566 * pp is unlocked physent
2567 * preop is operation to perform on physent before walk. This would be
2568 * used to set cache attribute or protection
2569 * op is the operation to perform on each mapping during walk
2570 * postop is operation to perform in the phsyent after walk. this would be
2571 * used to set or reset the RC bits.
2572 * opmod modifies the action taken on any connected PTEs visited during
2575 * We return the RC bits from before postop is run.
2577 * Note that this is designed to be called from 32-bit mode with a stack.
2579 * We disable translation and all interruptions here. This keeps is
2580 * from having to worry about a deadlock due to having anything locked
2581 * and needing it to process a fault.
2583 * We lock the physent, execute preop, and then walk each mapping in turn.
2584 * If there is a PTE, it is invalidated and the RC merged into the physent.
2585 * Then we call the op function.
2586 * Then we revalidate the PTE.
2587 * Once all all mappings are finished, we save the physent RC and call the
2588 * postop routine. Then we unlock the physent and return the RC.
2594 .globl EXT(hw_walk_phys)
2597 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2598 mflr r0 ; Save the link register
2599 stw r24,FM_ARG0+0x00(r1) ; Save a register
2600 stw r25,FM_ARG0+0x04(r1) ; Save a register
2601 stw r26,FM_ARG0+0x08(r1) ; Save a register
2602 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2603 mr r24,r8 ; Save the parm
2604 mr r25,r7 ; Save the parm
2605 stw r28,FM_ARG0+0x10(r1) ; Save a register
2606 stw r29,FM_ARG0+0x14(r1) ; Save a register
2607 stw r30,FM_ARG0+0x18(r1) ; Save a register
2608 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2609 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2611 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2613 mfsprg r26,0 ; (INSTRUMENTATION)
2614 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2615 addi r27,r27,1 ; (INSTRUMENTATION)
2616 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2617 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2618 slwi r12,r24,2 ; (INSTRUMENTATION)
2619 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2620 addi r27,r27,1 ; (INSTRUMENTATION)
2621 stwx r27,r26,r12 ; (INSTRUMENTATION)
2623 mr r26,r11 ; Save the old MSR
2624 lis r27,hi16(hwpOpBase) ; Get high order of op base
2625 slwi r4,r4,7 ; Convert preop to displacement
2626 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2627 slwi r5,r5,7 ; Convert op to displacement
2628 add r12,r4,r27 ; Point to the preop routine
2629 slwi r28,r6,7 ; Convert postop to displacement
2630 mtctr r12 ; Set preop routine
2631 add r28,r28,r27 ; Get the address of the postop routine
2632 add r27,r5,r27 ; Get the address of the op routine
2634 bl mapPhysLock ; Lock the physent
2636 mr r29,r3 ; Save the physent address
2638 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2640 bctrl ; Call preop routine
2641 bne- hwpEarly32 ; preop says to bail now...
2643 cmplwi r24,hwpMergePTE ; Classify operation modifier
2644 mtctr r27 ; Set up the op function address
2645 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2646 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2647 beq hwpMSrc32 ; Do TLB merge for each mapping
2649 hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2650 beq hwpNone32 ; Did not find one...
2652 bctrl ; Call the op function
2654 bne- hwpEarly32 ; op says to bail now...
2655 lwz r31,mpAlias+4(r31) ; Chain on to the next
2656 b hwpQSrc32 ; Check it out...
2659 hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2660 beq hwpNone32 ; Did not find one...
2662 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2663 bctrl ; Call the op function
2665 bne- hwpEarly32 ; op says to bail now...
2666 lwz r31,mpAlias+4(r31) ; Chain on to the next
2667 b hwpMSrc32 ; Check it out...
2670 hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2671 beq hwpNone32 ; Did not find one...
2674 ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2675 ; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2676 ; If there is no PTE, PTE low is obtained from mapping
2678 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2680 bctrl ; Call the op function
2682 crmove cr1_eq,cr0_eq ; Save the return code
2684 mr. r3,r3 ; Was there a previously valid PTE?
2685 beq- hwpNxt32 ; Nope...
2687 stw r5,4(r3) ; Store second half of PTE
2688 eieio ; Make sure we do not reorder
2689 stw r4,0(r3) ; Revalidate the PTE
2691 eieio ; Make sure all updates come first
2692 stw r6,0(r7) ; Unlock the PCA
2694 hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2695 lwz r31,mpAlias+4(r31) ; Chain on to the next
2696 b hwpSrc32 ; Check it out...
2700 hwpNone32: mtctr r28 ; Get the post routine address
2702 lwz r30,ppLink+4(r29) ; Save the old RC
2703 mr r3,r29 ; Get the physent address
2704 bctrl ; Call post routine
2706 bl mapPhysUnlock ; Unlock the physent
2708 mtmsr r26 ; Restore translation/mode/etc.
2711 b hwpReturn ; Go restore registers and return...
2715 hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2716 mr r3,r29 ; Get the physent address
2717 bl mapPhysUnlock ; Unlock the physent
2719 mtmsr r26 ; Restore translation/mode/etc.
2722 b hwpReturn ; Go restore registers and return...
2726 hwp64: bctrl ; Call preop routine
2727 bne-- hwpEarly64 ; preop says to bail now...
2729 cmplwi r24,hwpMergePTE ; Classify operation modifier
2730 mtctr r27 ; Set up the op function address
2733 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2734 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2735 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2736 beq hwpMSrc64 ; Do TLB merge for each mapping
2738 hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2739 beq hwpNone64 ; Did not find one...
2741 bctrl ; Call the op function
2743 bne-- hwpEarly64 ; op says to bail now...
2744 ld r31,mpAlias(r31) ; Chain on to the next
2745 b hwpQSrc64 ; Check it out...
2748 hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2749 beq hwpNone64 ; Did not find one...
2751 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2752 bctrl ; Call the op function
2754 bne-- hwpEarly64 ; op says to bail now...
2755 ld r31,mpAlias(r31) ; Chain on to the next
2756 b hwpMSrc64 ; Check it out...
2759 hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2760 beq hwpNone64 ; Did not find one...
2762 ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2763 ; PTE low in R5. PTEG comes back locked if there is one
2765 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2767 bctrl ; Call the op function
2769 crmove cr1_eq,cr0_eq ; Save the return code
2771 mr. r3,r3 ; Was there a previously valid PTE?
2772 beq-- hwpNxt64 ; Nope...
2774 std r5,8(r3) ; Save bottom of PTE
2775 eieio ; Make sure we do not reorder
2776 std r4,0(r3) ; Revalidate the PTE
2778 eieio ; Make sure all updates come first
2779 stw r6,0(r7) ; Unlock the PCA
2781 hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2782 ld r31,mpAlias(r31) ; Chain on to the next
2783 b hwpSrc64 ; Check it out...
2787 hwpNone64: mtctr r28 ; Get the post routine address
2789 lwz r30,ppLink+4(r29) ; Save the old RC
2790 mr r3,r29 ; Get the physent address
2791 bctrl ; Call post routine
2793 bl mapPhysUnlock ; Unlock the physent
2795 mtmsrd r26 ; Restore translation/mode/etc.
2797 b hwpReturn ; Go restore registers and return...
2801 hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2802 mr r3,r29 ; Get the physent address
2803 bl mapPhysUnlock ; Unlock the physent
2805 mtmsrd r26 ; Restore translation/mode/etc.
2808 hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2809 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2810 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2811 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
2812 mr r3,r30 ; Pass back the RC
2813 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2814 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
2815 mtlr r0 ; Restore the return
2816 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2817 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2818 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
2819 lwz r1,0(r1) ; Pop the stack
2824 ; The preop/op/postop function table.
2825 ; Each function must be 64-byte aligned and be no more than
2826 ; 16 instructions. If more than 16, we must fix address calculations
2827 ; at the start of hwpOpBase
2829 ; The routine must set CR0_EQ in order to continue scan.
2830 ; If CR0_EQ is not set, an early return from the function is made.
2837 ; Function 0 - No operation
2839 hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2840 blr ; Just return...
2844 ; This is the continuation of function 4 - Set attributes in mapping
2846 ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2847 ; NOTE: Do we have to deal with i-cache here?
2849 hwpSAM: li r11,4096 ; Get page size
2851 hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2852 dcbf r11,r5 ; Flush the line in the data cache
2853 bgt++ hwpSAMinvd ; Go do the rest of it...
2855 sync ; Make sure it is done
2857 li r11,4096 ; Get page size
2859 hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2860 icbi r11,r5 ; Flush the line in the icache
2861 bgt++ hwpSAMinvi ; Go do the rest of it...
2863 sync ; Make sure it is done
2865 cmpw r0,r0 ; Make sure we return CR0_EQ
2869 ; Function 1 - Set protection in physent (obsolete)
2871 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2873 hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
2877 ; Function 2 - Set protection in mapping
2879 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
2881 hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2882 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2883 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2884 li r0,lo16(mpN|mpPP) ; Get no-execute and protection bits
2885 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2886 rlwinm r2,r25,0,mpNb-32,mpPPe-32 ; Isolate new no-execute and protection bits
2887 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2888 andc r5,r5,r0 ; Clear the old no-execute and prot bits
2889 or r5,r5,r2 ; Move in the new no-execute and prot bits
2890 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2891 cmpw r0,r0 ; Make sure we return CR0_EQ
2892 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2895 ; Function 3 - Set attributes in physent
2897 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
2899 hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
2901 hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2902 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
2903 stwcx. r4,r5,r29 ; Try to stuff it
2904 bne-- hwpSAtrPhX ; Try again...
2905 ; Note: CR0_EQ is set because of stwcx.
2908 ; Function 4 - Set attributes in mapping
2910 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2912 hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2913 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2914 li r2,mpM ; Force on coherent
2915 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2916 li r0,lo16(mpWIMG) ; Get wimg mask
2917 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2918 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2919 ; Copy in the cache inhibited bit
2920 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2921 andc r5,r5,r0 ; Clear the old wimg
2922 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2923 ; Copy in the guarded bit
2924 mfsprg r9,2 ; Feature flags
2925 or r5,r5,r2 ; Move in the new wimg
2926 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2927 lwz r2,mpPAddr(r31) ; Get the physical address
2928 li r0,0xFFF ; Start a mask
2929 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2930 rlwinm r5,r0,0,1,0 ; Copy to top half
2931 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2932 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2933 and r5,r5,r2 ; Clean stuff in top 32 bits
2934 andc r2,r2,r0 ; Clean bottom too
2935 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2936 b hwpSAM ; Join common
2938 ; NOTE: we moved the remainder of the code out of here because it
2939 ; did not fit in the 128 bytes allotted. It got stuck into the free space
2940 ; at the end of the no-op function.
2945 ; Function 5 - Clear reference in physent
2947 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
2949 hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2951 hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2952 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2953 stwcx. r4,r5,r29 ; Try to stuff it
2954 bne-- hwpCRefPhX ; Try again...
2955 ; Note: CR0_EQ is set because of stwcx.
2959 ; Function 6 - Clear reference in mapping
2961 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
2963 hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2964 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2965 andc r5,r5,r0 ; Clear in PTE copy
2966 andc r8,r8,r0 ; and in the mapping
2967 cmpw r0,r0 ; Make sure we return CR0_EQ
2968 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2972 ; Function 7 - Clear change in physent
2974 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
2976 hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2978 hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2979 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2980 stwcx. r4,r5,r29 ; Try to stuff it
2981 bne-- hwpCCngPhX ; Try again...
2982 ; Note: CR0_EQ is set because of stwcx.
2986 ; Function 8 - Clear change in mapping
2988 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2990 hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2991 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2992 andc r5,r5,r0 ; Clear in PTE copy
2993 andc r8,r8,r0 ; and in the mapping
2994 cmpw r0,r0 ; Make sure we return CR0_EQ
2995 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2999 ; Function 9 - Set reference in physent
3001 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
3003 hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3005 hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
3006 ori r4,r4,lo16(ppR) ; Set the reference
3007 stwcx. r4,r5,r29 ; Try to stuff it
3008 bne-- hwpSRefPhX ; Try again...
3009 ; Note: CR0_EQ is set because of stwcx.
3013 ; Function 10 - Set reference in mapping
3015 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3017 hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3018 ori r8,r8,lo16(mpR) ; Set reference in mapping
3019 cmpw r0,r0 ; Make sure we return CR0_EQ
3020 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3023 ; Function 11 - Set change in physent
3025 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
3027 hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3029 hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3030 ori r4,r4,lo16(ppC) ; Set the change bit
3031 stwcx. r4,r5,r29 ; Try to stuff it
3032 bne-- hwpSCngPhX ; Try again...
3033 ; Note: CR0_EQ is set because of stwcx.
3036 ; Function 12 - Set change in mapping
3038 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
3040 hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3041 ori r8,r8,lo16(mpC) ; Set chage in mapping
3042 cmpw r0,r0 ; Make sure we return CR0_EQ
3043 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3046 ; Function 13 - Test reference in physent
3048 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3050 hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3051 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3052 blr ; Return (CR0_EQ set to continue if reference is off)...
3055 ; Function 14 - Test reference in mapping
3057 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
3059 hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3060 blr ; Return (CR0_EQ set to continue if reference is off)...
3063 ; Function 15 - Test change in physent
3065 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
3067 hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3068 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
3069 blr ; Return (CR0_EQ set to continue if change is off)...
3072 ; Function 16 - Test change in mapping
3074 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
3076 hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
3077 blr ; Return (CR0_EQ set to continue if change is off)...
3080 ; Function 17 - Test reference and change in physent
3082 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3085 lwz r0,ppLink+4(r29) ; Get the flags from physent
3086 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3087 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3088 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3089 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3092 ; Function 18 - Test reference and change in mapping
3094 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3096 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3097 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3098 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3099 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3102 ; Function 19 - Clear reference and change in physent
3104 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3106 li r5,ppLink+4 ; Get offset for flag part of physent
3109 lwarx r4,r5,r29 ; Get the old flags
3110 andc r4,r4,r25 ; Clear R and C as specified by mask
3111 stwcx. r4,r5,r29 ; Try to stuff it
3112 bne-- hwpCRefCngPhX ; Try again...
3113 ; Note: CR0_EQ is set because of stwcx.
3117 ; Function 20 - Clear reference and change in mapping
3119 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3121 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3122 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3123 andc r5,r5,r0 ; Clear in PTE copy
3124 andc r8,r8,r0 ; and in the mapping
3125 cmpw r0,r0 ; Make sure we return CR0_EQ
3126 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3130 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
3133 ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
3136 ; mapRtOK - if all is ok
3137 ; mapRtBadLk - if mapping lock fails
3138 ; mapRtPerm - if mapping is permanent
3139 ; mapRtNotFnd - if mapping is not found
3140 ; mapRtBlock - if mapping is a block
3143 .globl EXT(hw_protect)
3146 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3147 mflr r0 ; Save the link register
3148 stw r24,FM_ARG0+0x00(r1) ; Save a register
3149 stw r25,FM_ARG0+0x04(r1) ; Save a register
3150 mr r25,r7 ; Remember address of next va
3151 stw r26,FM_ARG0+0x08(r1) ; Save a register
3152 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3153 stw r28,FM_ARG0+0x10(r1) ; Save a register
3154 mr r24,r6 ; Save the new protection flags
3155 stw r29,FM_ARG0+0x14(r1) ; Save a register
3156 stw r30,FM_ARG0+0x18(r1) ; Save a register
3157 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3158 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3161 lwz r11,pmapFlags(r3) ; Get pmaps flags
3162 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3163 bne hpPanic ; Call not valid for guest shadow assist pmap
3166 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3167 lwz r7,pmapvr+4(r3) ; Get the second part
3170 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3172 mr r27,r11 ; Remember the old MSR
3173 mr r26,r12 ; Remember the feature bits
3175 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3177 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3179 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3181 hpSF1: mr r29,r4 ; Save top half of vaddr
3182 mr r30,r5 ; Save the bottom half
3184 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3185 bl sxlkShared ; Go get a shared lock on the mapping lists
3186 mr. r3,r3 ; Did we get the lock?
3187 bne-- hpBadLock ; Nope...
3189 mr r3,r28 ; get the pmap address
3190 mr r4,r29 ; Get bits 0:31 to look for
3191 mr r5,r30 ; Get bits 32:64
3193 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
3195 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3196 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3197 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3198 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3199 mr. r31,r3 ; Save the mapping if we found it
3200 mr r29,r4 ; Save next va high half
3201 mr r30,r5 ; Save next va low half
3203 beq-- hpNotFound ; Not found...
3205 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
3207 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3209 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3211 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
3212 mr. r3,r3 ; Was there a previously valid PTE?
3214 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3216 beq-- hpNoOld32 ; Nope...
3218 stw r5,4(r3) ; Store second half of PTE
3219 eieio ; Make sure we do not reorder
3220 stw r4,0(r3) ; Revalidate the PTE
3222 eieio ; Make sure all updates come first
3223 stw r6,0(r7) ; Unlock PCA
3225 hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3226 bl sxlkUnlock ; Unlock the search list
3228 li r3,mapRtOK ; Set normal return
3229 b hpR32 ; Join common...
3234 hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3236 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
3237 mr. r3,r3 ; Was there a previously valid PTE?
3239 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3241 beq-- hpNoOld64 ; Nope...
3243 std r5,8(r3) ; Store second half of PTE
3244 eieio ; Make sure we do not reorder
3245 std r4,0(r3) ; Revalidate the PTE
3247 eieio ; Make sure all updates come first
3248 stw r6,0(r7) ; Unlock PCA
3250 hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3251 bl sxlkUnlock ; Unlock the search list
3253 li r3,mapRtOK ; Set normal return
3254 b hpR64 ; Join common...
3258 hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3260 hpR32: mtmsr r27 ; Restore enables/translation/etc.
3262 b hpReturnC ; Join common...
3264 hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3267 hpReturnC: stw r29,0(r25) ; Save the top of the next va
3268 stw r30,4(r25) ; Save the bottom of the next va
3269 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3270 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3271 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3272 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3273 mtlr r0 ; Restore the return
3274 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3275 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3276 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3277 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3278 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3279 lwz r1,0(r1) ; Pop the stack
3284 hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3285 b hpReturn ; Leave....
3287 hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3288 bl sxlkUnlock ; Unlock the search list
3290 li r3,mapRtNotFnd ; Set that we did not find the requested page
3291 b hpReturn ; Leave....
3294 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3295 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3296 bne-- hpNotFound ; Yeah...
3297 bl sxlkUnlock ; Unlock the search list
3299 li r3,mapRtBlock ; Assume it was a block
3300 rlwinm r0,r7,0,mpType ; Isolate mapping type
3301 cmplwi r0,mpBlock ; Is this a block mapping?
3302 beq++ hpReturn ; Yes, leave...
3304 li r3,mapRtPerm ; Set that we hit a permanent page
3305 b hpReturn ; Leave....
3307 hpPanic: lis r0,hi16(Choke) ; System abend
3308 ori r0,r0,lo16(Choke) ; System abend
3309 li r3,failMapping ; Show that we failed some kind of mapping thing
3314 ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3316 ; Returns following code ORed with RC from mapping
3317 ; mapRtOK - if all is ok
3318 ; mapRtBadLk - if mapping lock fails
3319 ; mapRtNotFnd - if mapping is not found
3322 .globl EXT(hw_test_rc)
3325 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3326 mflr r0 ; Save the link register
3327 stw r24,FM_ARG0+0x00(r1) ; Save a register
3328 stw r25,FM_ARG0+0x04(r1) ; Save a register
3329 stw r26,FM_ARG0+0x08(r1) ; Save a register
3330 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3331 stw r28,FM_ARG0+0x10(r1) ; Save a register
3332 mr r24,r6 ; Save the reset request
3333 stw r29,FM_ARG0+0x14(r1) ; Save a register
3334 stw r30,FM_ARG0+0x18(r1) ; Save a register
3335 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3336 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3339 lwz r11,pmapFlags(r3) ; Get pmaps flags
3340 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3341 bne htrPanic ; Call not valid for guest shadow assist pmap
3344 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3345 lwz r7,pmapvr+4(r3) ; Get the second part
3348 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3350 mr r27,r11 ; Remember the old MSR
3351 mr r26,r12 ; Remember the feature bits
3353 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3355 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
3357 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3359 htrSF1: mr r29,r4 ; Save top half of vaddr
3360 mr r30,r5 ; Save the bottom half
3362 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3363 bl sxlkShared ; Go get a shared lock on the mapping lists
3364 mr. r3,r3 ; Did we get the lock?
3366 bne-- htrBadLock ; Nope...
3368 mr r3,r28 ; get the pmap address
3369 mr r4,r29 ; Get bits 0:31 to look for
3370 mr r5,r30 ; Get bits 32:64
3372 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
3374 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3375 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3376 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3377 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3378 mr. r31,r3 ; Save the mapping if we found it
3379 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
3381 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
3383 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3385 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3387 cmplwi cr1,r24,0 ; Do we want to clear RC?
3388 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3389 mr. r3,r3 ; Was there a previously valid PTE?
3390 li r0,lo16(mpR|mpC) ; Get bits to clear
3392 and r25,r5,r0 ; Save the RC bits
3393 beq++ cr1,htrNoClr32 ; Nope...
3395 andc r12,r12,r0 ; Clear mapping copy of RC
3396 andc r5,r5,r0 ; Clear PTE copy of RC
3397 sth r12,mpVAddr+6(r31) ; Set the new RC
3399 htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
3401 sth r5,6(r3) ; Store updated RC
3402 eieio ; Make sure we do not reorder
3403 stw r4,0(r3) ; Revalidate the PTE
3405 eieio ; Make sure all updates come first
3406 stw r6,0(r7) ; Unlock PCA
3408 htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3409 bl sxlkUnlock ; Unlock the search list
3410 li r3,mapRtOK ; Set normal return
3411 b htrR32 ; Join common...
3416 htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3418 cmplwi cr1,r24,0 ; Do we want to clear RC?
3419 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3420 mr. r3,r3 ; Was there a previously valid PTE?
3421 li r0,lo16(mpR|mpC) ; Get bits to clear
3423 and r25,r5,r0 ; Save the RC bits
3424 beq++ cr1,htrNoClr64 ; Nope...
3426 andc r12,r12,r0 ; Clear mapping copy of RC
3427 andc r5,r5,r0 ; Clear PTE copy of RC
3428 sth r12,mpVAddr+6(r31) ; Set the new RC
3430 htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3432 sth r5,14(r3) ; Store updated RC
3433 eieio ; Make sure we do not reorder
3434 std r4,0(r3) ; Revalidate the PTE
3436 eieio ; Make sure all updates come first
3437 stw r6,0(r7) ; Unlock PCA
3439 htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3440 bl sxlkUnlock ; Unlock the search list
3441 li r3,mapRtOK ; Set normal return
3442 b htrR64 ; Join common...
3446 htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
3448 htrR32: mtmsr r27 ; Restore enables/translation/etc.
3450 b htrReturnC ; Join common...
3452 htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3455 htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3456 or r3,r3,r25 ; Send the RC bits back
3457 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3458 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3459 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3460 mtlr r0 ; Restore the return
3461 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3462 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3463 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3464 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3465 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3466 lwz r1,0(r1) ; Pop the stack
3471 htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3472 b htrReturn ; Leave....
3475 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3476 bl sxlkUnlock ; Unlock the search list
3478 li r3,mapRtNotFnd ; Set that we did not find the requested page
3479 b htrReturn ; Leave....
3481 htrPanic: lis r0,hi16(Choke) ; System abend
3482 ori r0,r0,lo16(Choke) ; System abend
3483 li r3,failMapping ; Show that we failed some kind of mapping thing
3489 ; mapFindLockPN - find and lock physent for a given page number
3494 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3495 mr r2,r3 ; Save our target
3496 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3498 mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3499 lwz r5,mrStart(r9) ; Get start of table entry
3500 lwz r0,mrEnd(r9) ; Get end of table entry
3501 addi r9,r9,mrSize ; Point to the next slot
3502 cmplwi cr7,r3,0 ; Are we at the end of the table?
3503 cmplw r2,r5 ; See if we are in this table
3504 cmplw cr1,r2,r0 ; Check end also
3505 sub r4,r2,r5 ; Calculate index to physical entry
3506 beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry...
3507 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3508 slwi r4,r4,3 ; Get offset to physical entry
3510 blt-- mapFLPNitr ; Did not find it...
3512 add r3,r3,r4 ; Point right to the slot
3513 b mapPhysLock ; Join common lock code
3516 li r3,0 ; Show that we did not find it
3521 ; mapPhysFindLock - find physent list and lock it
3522 ; R31 points to mapping
3527 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3528 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
3529 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
3530 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3531 add r3,r3,r4 ; Point to table entry
3532 lwz r5,mpPAddr(r31) ; Get physical page number
3533 lwz r7,mrStart(r3) ; Get the start of range
3534 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3535 sub r6,r5,r7 ; Get index to physent
3536 rlwinm r6,r6,3,0,28 ; Get offset to physent
3537 add r3,r3,r6 ; Point right to the physent
3538 b mapPhysLock ; Join in the lock...
3541 ; mapPhysLock - lock a physent list
3542 ; R3 contains list header
3547 li r2,lgKillResv ; Get a spot to kill reservation
3548 stwcx. r2,0,r2 ; Kill it...
3551 lwz r2,ppLink(r3) ; Get physent chain header
3552 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3553 bne-- mapPhysLockT ; Nope, still locked...
3556 lwarx r2,0,r3 ; Get the lock
3557 rlwinm. r0,r2,0,0,0 ; Is it locked?
3558 oris r0,r2,0x8000 ; Set the lock bit
3559 bne-- mapPhysLockS ; It is locked, spin on it...
3560 stwcx. r0,0,r3 ; Try to stuff it back...
3561 bne-- mapPhysLock ; Collision, try again...
3562 isync ; Clear any speculations
3567 ; mapPhysUnlock - unlock a physent list
3568 ; R3 contains list header
3573 lwz r0,ppLink(r3) ; Get physent chain header
3574 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3575 eieio ; Make sure unlock comes last
3576 stw r0,ppLink(r3) ; Unlock the list
3580 ; mapPhysMerge - merge the RC bits into the master copy
3581 ; R3 points to the physent
3582 ; R4 contains the RC bits
3584 ; Note: we just return if RC is 0
3589 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3590 la r5,ppLink+4(r3) ; Point to the RC field
3591 beqlr-- ; Leave if RC is 0...
3594 lwarx r6,0,r5 ; Get the RC part
3595 or r6,r6,r4 ; Merge in the RC
3596 stwcx. r6,0,r5 ; Try to stuff it back...
3597 bne-- mapPhysMergeT ; Collision, try again...
3601 ; Sets the physent link pointer and preserves all flags
3602 ; The list is locked
3603 ; R3 points to physent
3604 ; R4 has link to set
3610 la r5,ppLink+4(r3) ; Point to the link word
3613 lwarx r2,0,r5 ; Get the link and flags
3614 rlwimi r4,r2,0,ppFlags ; Insert the flags
3615 stwcx. r4,0,r5 ; Stick them back
3616 bne-- mapPhyCSetR ; Someone else did something, try again...
3622 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3623 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
3626 ldarx r2,0,r3 ; Get the link and flags
3627 and r5,r2,r0 ; Isolate the flags
3628 or r6,r4,r5 ; Add them to the link
3629 stdcx. r6,0,r3 ; Stick them back
3630 bne-- mapPhyCSet64x ; Someone else did something, try again...
3634 ; mapBumpBusy - increment the busy count on a mapping
3635 ; R3 points to mapping
3641 lwarx r4,0,r3 ; Get mpBusy
3642 addis r4,r4,0x0100 ; Bump the busy count
3643 stwcx. r4,0,r3 ; Save it back
3644 bne-- mapBumpBusy ; This did not work, try again...
3648 ; mapDropBusy - increment the busy count on a mapping
3649 ; R3 points to mapping
3652 .globl EXT(mapping_drop_busy)
3655 LEXT(mapping_drop_busy)
3657 lwarx r4,0,r3 ; Get mpBusy
3658 addis r4,r4,0xFF00 ; Drop the busy count
3659 stwcx. r4,0,r3 ; Save it back
3660 bne-- mapDropBusy ; This did not work, try again...
3664 ; mapDrainBusy - drain the busy count on a mapping
3665 ; R3 points to mapping
3666 ; Note: we already have a busy for ourselves. Only one
3667 ; busy per processor is allowed, so we just spin here
3668 ; waiting for the count to drop to 1.
3669 ; Also, the mapping can not be on any lists when we do this
3670 ; so all we are doing is waiting until it can be released.
3676 lwz r4,mpFlags(r3) ; Get mpBusy
3677 rlwinm r4,r4,8,24,31 ; Clean it up
3678 cmplwi r4,1 ; Is is just our busy?
3679 beqlr++ ; Yeah, it is clear...
3680 b mapDrainBusy ; Try again...
3685 ; handleDSeg - handle a data segment fault
3686 ; handleISeg - handle an instruction segment fault
3688 ; All that we do here is to map these to DSI or ISI and insure
3689 ; that the hash bit is not set. This forces the fault code
3690 ; to also handle the missing segment.
3692 ; At entry R2 contains per_proc, R13 contains savarea pointer,
3693 ; and R11 is the exception code.
3697 .globl EXT(handleDSeg)
3701 li r11,T_DATA_ACCESS ; Change fault to DSI
3702 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3703 b EXT(handlePF) ; Join common...
3706 .globl EXT(handleISeg)
3710 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3711 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3712 b EXT(handlePF) ; Join common...
3716 * handlePF - handle a page fault interruption
3718 * At entry R2 contains per_proc, R13 contains savarea pointer,
3719 * and R11 is the exception code.
3721 * This first part does a quick check to see if we can handle the fault.
3722 * We canot handle any kind of protection exceptions here, so we pass
3723 * them up to the next level.
3725 * NOTE: In order for a page-fault redrive to work, the translation miss
3726 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3727 * before we come here.
3731 .globl EXT(handlePF)
3735 mfsprg r12,2 ; Get feature flags
3736 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3737 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3738 mtcrf 0x02,r12 ; move pf64Bit to cr6
3739 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3740 lwz r18,SAVflags(r13) ; Get the flags
3742 beq-- gotIfetch ; We have an IFETCH here...
3744 lwz r27,savedsisr(r13) ; Get the DSISR
3745 lwz r29,savedar(r13) ; Get the first half of the DAR
3746 lwz r30,savedar+4(r13) ; And second half
3748 b ckIfProt ; Go check if this is a protection fault...
3750 gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3751 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3752 lwz r30,savesrr0+4(r13) ; And second half
3753 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3755 ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3756 li r20,64 ; Set a limit of 64 nests for sanity check
3757 bne-- hpfExit ; Yes... (probably not though)
3760 ; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3761 ; should be loading the user pmap here.
3764 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3765 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3766 mr r19,r2 ; Remember the per_proc
3767 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3768 mr r23,r30 ; Save the low part of faulting address
3769 beq-- hpfInKern ; Skip if we are in the kernel
3770 la r8,ppUserPmap(r19) ; Point to the current user pmap
3772 hpfInKern: mr r22,r29 ; Save the high part of faulting address
3774 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3777 ; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3778 ; predefined value that corresponds to no address space. When we see that value
3779 ; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3780 ; cause the proper SR to be loaded.
3783 lwz r28,4(r8) ; Pick up the pmap
3784 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3785 mr r25,r28 ; Save the original pmap (in case we nest)
3786 lwz r0,pmapFlags(r28) ; Get pmap's flags
3787 bne hpfGVtest ; Segs are not ours if so...
3788 mfsrin r4,r30 ; Get the SR that was used for translation
3789 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3790 bne++ hpfGVtest ; No...
3792 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3793 b hpfGVtest ; Join on up...
3797 nop ; Push hpfNest to a 32-byte boundary
3798 nop ; Push hpfNest to a 32-byte boundary
3799 nop ; Push hpfNest to a 32-byte boundary
3801 hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3802 mr r25,r28 ; Save the original pmap (in case we nest)
3803 lwz r0,pmapFlags(r28) ; Get pmap's flags
3805 hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3806 bne hpfGVxlate ; Yup, do accelerated shadow stuff
3809 ; This is where we loop descending nested pmaps
3812 hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3813 addi r20,r20,-1 ; Count nest try
3814 bl sxlkShared ; Go get a shared lock on the mapping lists
3815 mr. r3,r3 ; Did we get the lock?
3816 bne-- hpfBadLock ; Nope...
3818 mr r3,r28 ; Get the pmap pointer
3819 mr r4,r22 ; Get top of faulting vaddr
3820 mr r5,r23 ; Get bottom of faulting vaddr
3821 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3823 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3824 mr. r31,r3 ; Save the mapping if we found it
3825 cmplwi cr1,r0,0 ; Check for removal
3826 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3828 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3830 rlwinm r0,r7,0,mpType ; Isolate mapping type
3831 cmplwi r0,mpNest ; Are we again nested?
3832 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3833 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
3834 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3836 lhz r21,mpSpace(r31) ; Get the space
3838 bne++ hpfFoundIt ; No, we found our guy...
3841 #if pmapTransSize != 12
3842 #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3844 cmplwi r0,mpLinkage ; Linkage mapping?
3845 cmplwi cr1,r20,0 ; Too many nestings?
3846 beq-- hpfSpclNest ; Do we need to do special handling?
3848 hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3849 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3850 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3851 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3852 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3853 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3854 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3855 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3856 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3857 slwi r11,r21,3 ; Multiply space by 8
3858 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3859 addc r23,r23,r9 ; Relocate bottom half of vaddr
3860 lwz r10,0(r10) ; Get the actual translation map
3861 slwi r12,r21,2 ; Multiply space by 4
3862 add r10,r10,r11 ; Add in the higher part of the index
3863 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3864 adde r22,r22,r8 ; Relocate the top half of the vaddr
3865 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3866 bl sxlkUnlock ; Unlock the search list
3868 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
3869 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3870 cmplwi r28,0 ; Is the pmap paddr valid?
3871 bne+ hpfNest ; Nest into new pmap...
3872 b hpfBadPmap ; Handle bad pmap
3875 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3876 cmpldi r28,0 ; Is the pmap paddr valid?
3877 bne++ hpfNest ; Nest into new pmap...
3878 b hpfBadPmap ; Handle bad pmap
3882 ; Error condition. We only allow 64 nestings. This keeps us from having to
3883 ; check for recusive nests when we install them.
3889 lwz r20,savedsisr(r13) ; Get the DSISR
3890 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3891 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3892 ori r20,r20,1 ; Indicate that there was a nesting problem
3893 stw r20,savedsisr(r13) ; Stash it
3894 lwz r11,saveexception(r13) ; Restore the exception code
3895 b EXT(PFSExit) ; Yes... (probably not though)
3898 ; Error condition - lock failed - this is fatal
3904 lis r0,hi16(Choke) ; System abend
3905 ori r0,r0,lo16(Choke) ; System abend
3906 li r3,failMapping ; Show mapping failure
3910 ; Error condition - space id selected an invalid pmap - fatal
3916 lis r0,hi16(Choke) ; System abend
3917 ori r0,r0,lo16(Choke) ; System abend
3918 li r3,failPmap ; Show invalid pmap
3922 ; Did not find any kind of mapping
3928 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3929 bl sxlkUnlock ; Unlock it
3930 lwz r11,saveexception(r13) ; Restore the exception code
3932 hpfExit: ; We need this because we can not do a relative branch
3933 b EXT(PFSExit) ; Yes... (probably not though)
3937 ; Here is where we handle special mappings. So far, the only use is to load a
3938 ; processor specific segment register for copy in/out handling.
3940 ; The only (so far implemented) special map is used for copyin/copyout.
3941 ; We keep a mapping of a "linkage" mapping in the per_proc.
3942 ; The linkage mapping is basically a nested pmap that is switched in
3943 ; as part of context switch. It relocates the appropriate user address
3944 ; space slice into the right place in the kernel.
3950 la r31,ppUMWmp(r19) ; Just point to the mapping
3951 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
3952 b hpfCSrch ; Go continue search...
3956 ; We have now found a mapping for the address we faulted on.
3960 ; Here we go about calculating what the VSID should be. We concatanate
3961 ; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3962 ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3963 ; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3966 ; This is used both for segment handling and PTE handling
3971 #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3974 ; Important non-volatile registers at this point ('home' means the final pmap/mapping found
3975 ; when a multi-level mapping has been successfully searched):
3976 ; r21: home space id number
3977 ; r22: relocated high-order 32 bits of vaddr
3978 ; r23: relocated low-order 32 bits of vaddr
3979 ; r25: pmap physical address
3981 ; r28: home pmap physical address
3982 ; r29: high-order 32 bits of faulting vaddr
3983 ; r30: low-order 32 bits of faulting vaddr
3984 ; r31: mapping's physical address
3988 hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3989 hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3990 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3991 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3992 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3993 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
3994 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3995 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3996 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3997 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
3998 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
3999 xor r14,r14,r20 ; Calculate the top half of VSID
4000 xor r15,r15,r21 ; Calculate the bottom half of the VSID
4001 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
4002 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
4003 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
4004 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
4005 or r12,r12,r15 ; Add key into the bottom of VSID
4007 ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4009 bne++ hpfPteMiss ; Nope, normal PTE miss...
4012 ; Here is the only place that we make an entry in the pmap segment cache.
4014 ; Note that we do not make an entry in the segment cache for special
4015 ; nested mappings. This makes the copy in/out segment get refreshed
4016 ; when switching threads.
4018 ; The first thing that we do is to look up the ESID we are going to load
4019 ; into a segment in the pmap cache. If it is already there, this is
4020 ; a segment that appeared since the last time we switched address spaces.
4021 ; If all is correct, then it was another processors that made the cache
4022 ; entry. If not, well, it is an error that we should die on, but I have
4023 ; not figured a good way to trap it yet.
4025 ; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4026 ; an entry based on the generation number, update the cache entry, and
4027 ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4028 ; entries that correspond to the last 4 bits (32:35 for 64-bit and
4029 ; 0:3 for 32-bit) of the ESID.
4031 ; Then we unlock and bail.
4033 ; First lock it. Then select a free slot or steal one based on the generation
4034 ; number. Then store it, update the allocation flags, and unlock.
4036 ; The cache entry contains an image of the ESID/VSID pair we would load for
4037 ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4039 ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4040 ; the current one, which may have changed because we nested.
4042 ; Also remember that we do not store the valid bit in the ESID. If we
4043 ; od, this will break some other stuff.
4046 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4048 mr r3,r25 ; Point to the pmap
4049 mr r4,r29 ; ESID high half
4050 mr r5,r30 ; ESID low half
4051 bl pmapCacheLookup ; Go see if this is in the cache already
4053 mr. r3,r3 ; Did we find it?
4054 mr r4,r11 ; Copy this to a different register
4056 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4058 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4059 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4061 cntlzw r7,r4 ; Find a free slot
4063 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4064 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4065 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4066 addi r5,r4,1 ; Bump the generation number
4067 and r7,r7,r6 ; Clear bit number if none empty
4068 andc r8,r4,r6 ; Clear generation count if we found an empty
4069 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4070 or r7,r7,r8 ; Select a slot number
4072 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4073 oris r8,r8,0x8000 ; Get the high bit on
4074 la r9,pmapSegCache(r25) ; Point to the segment cache
4075 slwi r6,r7,4 ; Get index into the segment cache
4076 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4077 srw r8,r8,r7 ; Get the mask
4078 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4080 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4081 oris r0,r0,0xF000 ; Get the sub-tag mask
4082 add r9,r9,r6 ; Point to the cache slot
4083 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4084 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4086 stw r29,sgcESID(r9) ; Save the top of the ESID
4087 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4088 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4089 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4090 or r10,r10,r5 ; Stick in subtag in case top half
4091 or r11,r11,r5 ; Stick in subtag in case bottom half
4092 stw r14,sgcVSID(r9) ; Save the top of the VSID
4093 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4094 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4095 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4097 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4098 b hpfNoCacheEnt ; Go finish up...
4101 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4105 eieio ; Make sure cache is updated before lock
4106 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4110 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4111 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4114 ; Make and enter 32-bit segment register
4117 lwz r16,validSegs(r19) ; Get the valid SR flags
4118 xor r12,r12,r4 ; Alter the storage key before loading segment register
4119 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4120 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4121 lis r0,0x8000 ; Set bit 0
4122 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4123 srw r0,r0,r2 ; Get bit corresponding to SR
4124 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4125 or r16,r16,r0 ; Show that SR is valid
4127 mtsrin r6,r30 ; Set the actual SR
4129 stw r16,validSegs(r19) ; Set the valid SR flags
4131 b hpfPteMiss ; SR loaded, go do a PTE...
4134 ; Make and enter 64-bit segment look-aside buffer entry.
4135 ; Note that the cache entry is the right format except for valid bit.
4136 ; We also need to convert from long long to 64-bit register values.
4143 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4144 sldi r8,r29,32 ; Move high order address over
4145 sldi r10,r14,32 ; Move high part of VSID over
4147 not r3,r16 ; Make valids be 0s
4148 li r0,1 ; Prepare to set bit 0
4150 cntlzd r17,r3 ; Find a free SLB
4151 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4152 or r9,r8,r30 ; Form full 64-bit address
4153 cmplwi r17,63 ; Did we find a free SLB entry?
4154 sldi r0,r0,63 ; Get bit 0 set
4155 or r10,r10,r12 ; Move in low part and keys
4156 addi r17,r17,1 ; Skip SLB 0 always
4157 blt++ hpfFreeSeg ; Yes, go load it...
4160 ; No free SLB entries, select one that is in use and invalidate it
4162 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4163 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4164 addi r4,r4,1 ; Set next slot to steal
4165 slbmfee r7,r17 ; Get the entry that is in the selected spot
4166 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4167 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4168 srawi r2,r2,31 ; Get -1 if steal index still in range
4169 slbie r7 ; Invalidate the in-use SLB entry
4170 and r4,r4,r2 ; Reset steal index when it should wrap
4173 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4175 ; We are now ready to stick the SLB entry in the SLB and mark it in use
4179 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4180 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4181 srd r0,r0,r4 ; Set bit mask for allocation
4182 oris r9,r9,0x0800 ; Turn on the valid bit
4183 or r16,r16,r0 ; Turn on the allocation flag
4184 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4186 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4187 slbie r7 ; Blow away a potential duplicate
4189 hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4191 std r16,validSegs(r19) ; Mark as valid
4192 b hpfPteMiss ; STE loaded, go do a PTE...
4195 ; The segment has been set up and loaded if need be. Now we are ready to build the
4196 ; PTE and get it into the hash table.
4198 ; Note that there is actually a race here. If we start fault processing on
4199 ; a different pmap, i.e., we have descended into a nested pmap, it is possible
4200 ; that the nest could have been removed from the original pmap. We would
4201 ; succeed with this translation anyway. I do not think we need to worry
4202 ; about this (famous last words) because nobody should be unnesting anything
4203 ; if there are still people activily using them. It should be up to the
4204 ; higher level VM system to put the kibosh on this.
4206 ; There is also another race here: if we fault on the same mapping on more than
4207 ; one processor at the same time, we could end up with multiple PTEs for the same
4208 ; mapping. This is not a good thing.... We really only need one of the
4209 ; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4210 ; the mapping. If we see that set, we just abandon the handler and hope that by
4211 ; the time we restore context and restart the interrupted code, the fault has
4212 ; been resolved by the other guy. If not, we will take another fault.
4216 ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4217 ; It is required to stay there until after we call mapSelSlot!!!!
4222 hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4223 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4224 li r3,mpHValid ; Get the PTE valid bit
4225 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4226 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4227 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4228 and. r12,r12,r3 ; Isolate the valid bit
4229 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4230 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
4231 rlwinm r0,r2,0,mpType ; Isolate mapping type
4232 cmplwi r0,mpBlock ; Is this a block mapping?
4233 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
4234 stwcx. r2,0,r31 ; Store the flags
4235 bne-- hpfPteMiss ; Collision, try again...
4237 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4240 ; At this point we are about to do the 32-bit PTE generation.
4242 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4246 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4247 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4248 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4250 ; The 24 bits of the 32-bit architecture VSID is in the following:
4254 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4255 ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4256 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4261 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4262 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4264 mfsdr1 r27 ; Get the hash table base address
4266 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4267 rlwinm r18,r23,10,26,31 ; Extract the API
4268 xor r19,r15,r0 ; Calculate hash << 12
4269 mr r2,r25 ; Save the flag part of the mapping
4270 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4271 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4272 rlwinm r25,r25,0,0,19 ; Clear out the flags
4273 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4274 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4275 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4276 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4277 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4278 add r24,r24,r25 ; Adjust to true physical address
4279 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4280 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4281 and r19,r19,r16 ; Wrap hash table offset into the hash table
4282 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4283 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4284 add r19,r19,r27 ; Point to the PTEG
4285 subfic r20,r20,-4 ; Get negative offset to PCA
4286 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4287 add r20,r20,r27 ; Point to the PCA slot
4290 ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4291 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4293 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4294 ; that some other processor beat us and stuck in a PTE or that
4295 ; all we had was a simple segment exception and the PTE was there the whole time.
4296 ; If we find one a pointer, we are done.
4299 mr r7,r20 ; Copy the PCA pointer
4300 bl mapLockPteg ; Lock the PTEG
4302 lwz r12,mpPte(r31) ; Get the offset to the PTE
4303 mr r17,r6 ; Remember the PCA image
4304 mr r16,r6 ; Prime the post-select PCA image
4305 andi. r0,r12,mpHValid ; Is there a PTE here already?
4306 li r21,8 ; Get the number of slots
4308 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4310 bne- hpfBailOut ; Someone already did this for us...
4313 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
4314 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4315 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4316 ; R4 returns the slot index.
4318 ; REMEMBER: CR7 indicates that we are building a block mapping.
4321 hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4322 mr r6,r17 ; Get back the original PCA
4323 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4324 blt- hpfBailOut ; Holy Cow, all slots are locked...
4326 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4328 cmplwi cr5,r3,1 ; Did we steal a slot?
4329 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
4330 mr r16,r6 ; Remember the PCA image after selection
4331 blt+ cr5,hpfInser32 ; Nope, no steal...
4333 lwz r6,0(r19) ; Get the old PTE
4334 lwz r7,4(r19) ; Get the real part of the stealee
4335 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4336 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4337 srwi r3,r7,12 ; Change phys address to a ppnum
4338 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4339 cmplwi cr1,r3,0 ; Check if this is in RAM
4340 bne- hpfNoPte32 ; Could not get it, try for another...
4342 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4344 hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4346 sync ; Make sure the invalid is stored
4347 li r9,tlbieLock ; Get the TLBIE lock
4348 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4350 hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4351 mfsprg r4,0 ; Get the per_proc
4352 rlwinm r8,r6,25,18,31 ; Extract the space ID
4353 rlwinm r11,r6,25,18,31 ; Extract the space ID
4354 lwz r7,hwSteals(r4) ; Get the steal count
4355 srwi r2,r6,7 ; Align segment number with hash
4356 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4357 mr. r0,r0 ; Is it locked?
4358 srwi r0,r19,6 ; Align PTEG offset for back hash
4359 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4360 xor r11,r11,r0 ; Hash backwards to partial vaddr
4361 rlwinm r12,r2,14,0,3 ; Shift segment up
4362 mfsprg r2,2 ; Get feature flags
4363 li r0,1 ; Get our lock word
4364 rlwimi r12,r6,22,4,9 ; Move up the API
4365 bne- hpfTLBIE32 ; It is locked, go wait...
4366 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4368 stwcx. r0,0,r9 ; Try to get it
4369 bne- hpfTLBIE32 ; We was beat...
4370 addi r7,r7,1 ; Bump the steal count
4372 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4373 li r0,0 ; Lock clear value
4375 tlbie r12 ; Invalidate it everywhere
4378 beq- hpfNoTS32 ; Can not have MP on this machine...
4380 eieio ; Make sure that the tlbie happens first
4381 tlbsync ; Wait for everyone to catch up
4382 sync ; Make sure of it all
4384 hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
4386 stw r7,hwSteals(r4) ; Save the steal count
4387 bgt cr5,hpfInser32 ; We just stole a block mapping...
4389 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4391 la r11,ppLink+4(r3) ; Point to the master RC copy
4392 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4393 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4395 hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4396 or r0,r0,r2 ; Merge in the new RC
4397 stwcx. r0,0,r11 ; Try to stick it back
4398 bne- hpfMrgRC32 ; Try again if we collided...
4401 hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
4402 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4404 lhz r10,mpSpace(r7) ; Get the space
4405 lwz r9,mpVAddr+4(r7) ; And the vaddr
4406 cmplw cr1,r10,r8 ; Is this one of ours?
4407 xor r9,r12,r9 ; Compare virtual address
4408 cmplwi r9,0x1000 ; See if we really match
4409 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4410 beq+ hpfFPnch2 ; Yes, found ours...
4412 lwz r7,mpAlias+4(r7) ; Chain on to the next
4413 b hpfFPnch ; Check it out...
4415 hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4416 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4417 bl mapPhysUnlock ; Unlock the physent now
4419 hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4421 stw r24,4(r19) ; Stuff in the real part of the PTE
4422 eieio ; Make sure this gets there first
4424 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4425 mr r17,r16 ; Get the PCA image to save
4426 b hpfFinish ; Go join the common exit code...
4430 ; At this point we are about to do the 64-bit PTE generation.
4432 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4436 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4437 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4438 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4445 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4446 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4448 mfsdr1 r27 ; Get the hash table base address
4450 sldi r11,r22,32 ; Slide top of adjusted EA over
4451 sldi r14,r14,32 ; Slide top of VSID over
4452 rlwinm r5,r27,0,27,31 ; Isolate the size
4453 eqv r16,r16,r16 ; Get all foxes here
4454 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4455 mr r2,r10 ; Save the flag part of the mapping
4456 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4457 rldicr r27,r27,0,45 ; Clean up the hash table base
4458 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4459 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4460 subfic r5,r5,46 ; Get number of leading zeros
4461 xor r19,r0,r15 ; Calculate hash
4462 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4463 srd r16,r16,r5 ; Shift over to get length of table
4464 srdi r19,r19,5 ; Convert page offset to hash table offset
4465 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4466 rldicr r10,r10,0,51 ; Clear out flags
4467 sldi r24,r24,12 ; Change ppnum to physical address
4468 sub r11,r11,r10 ; Get the offset from the base mapping
4469 and r19,r19,r16 ; Wrap into hash table
4470 add r24,r24,r11 ; Get actual physical address of this page
4471 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4472 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4473 subfic r20,r20,-4 ; Get negative offset to PCA
4474 ori r24,r24,lo16(mpR) ; Force on the reference bit
4475 add r20,r20,r27 ; Point to the PCA slot
4476 add r19,r19,r27 ; Point to the PTEG
4479 ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4480 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4482 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4483 ; that some other processor beat us and stuck in a PTE or that
4484 ; all we had was a simple segment exception and the PTE was there the whole time.
4485 ; If we find one a pointer, we are done.
4488 mr r7,r20 ; Copy the PCA pointer
4489 bl mapLockPteg ; Lock the PTEG
4491 lwz r12,mpPte(r31) ; Get the offset to the PTE
4492 mr r17,r6 ; Remember the PCA image
4493 mr r18,r6 ; Prime post-selection PCA image
4494 andi. r0,r12,mpHValid ; See if we have a PTE now
4495 li r21,8 ; Get the number of slots
4497 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4499 bne-- hpfBailOut ; Someone already did this for us...
4502 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4503 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4504 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4505 ; R4 returns the slot index.
4507 ; REMEMBER: CR7 indicates that we are building a block mapping.
4510 hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4511 mr r6,r17 ; Restore original state of PCA
4512 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4513 blt- hpfBailOut ; Holy Cow, all slots are locked...
4515 bl mapSelSlot ; Go select a slot
4517 cmplwi cr5,r3,1 ; Did we steal a slot?
4518 mr r18,r6 ; Remember the PCA image after selection
4519 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
4520 lwz r10,hwSteals(r2) ; Get the steal count
4521 blt++ cr5,hpfInser64 ; Nope, no steal...
4523 ld r6,0(r19) ; Get the old PTE
4524 ld r7,8(r19) ; Get the real part of the stealee
4525 rldicr r6,r6,0,62 ; Clear the valid bit
4526 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4527 srdi r3,r7,12 ; Change page address to a page address
4528 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4529 cmplwi cr1,r3,0 ; Check if this is in RAM
4530 bne-- hpfNoPte64 ; Could not get it, try for another...
4532 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4534 hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4535 li r9,tlbieLock ; Get the TLBIE lock
4537 srdi r11,r6,5 ; Shift VSID over for back hash
4538 mfsprg r4,0 ; Get the per_proc
4539 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4540 sync ; Make sure the invalid is stored
4542 sldi r12,r6,16 ; Move AVPN to EA position
4543 sldi r11,r11,5 ; Move this to the page position
4545 hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4546 mr. r0,r0 ; Is it locked?
4547 li r0,1 ; Get our lock word
4548 bne-- hpfTLBIE65 ; It is locked, go wait...
4550 stwcx. r0,0,r9 ; Try to get it
4551 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4552 rldicl r8,r6,52,50 ; Isolate the address space ID
4553 bne-- hpfTLBIE64 ; We was beat...
4554 addi r10,r10,1 ; Bump the steal count
4556 rldicl r11,r12,0,16 ; Clear cause the book says so
4557 li r0,0 ; Lock clear value
4559 tlbie r11 ; Invalidate it everywhere
4561 mr r7,r8 ; Get a copy of the space ID
4562 eieio ; Make sure that the tlbie happens first
4563 rldimi r7,r7,14,36 ; Copy address space to make hash value
4564 tlbsync ; Wait for everyone to catch up
4565 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
4566 srdi r2,r6,26 ; Shift original segment down to bottom
4568 ptesync ; Make sure of it all
4569 xor r7,r7,r2 ; Compute original segment
4570 stw r0,tlbieLock(0) ; Clear the tlbie lock
4572 stw r10,hwSteals(r4) ; Save the steal count
4573 bgt cr5,hpfInser64 ; We just stole a block mapping...
4575 rldimi r12,r7,28,0 ; Insert decoded segment
4576 rldicl r4,r12,0,13 ; Trim to max supported address
4578 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4580 la r11,ppLink+4(r3) ; Point to the master RC copy
4581 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4582 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4584 hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
4585 li r12,ppLFAmask ; Get mask to clean up alias pointer
4586 or r0,r0,r2 ; Merge in the new RC
4587 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
4588 stwcx. r0,0,r11 ; Try to stick it back
4589 bne-- hpfMrgRC64 ; Try again if we collided...
4591 hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4592 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4594 lhz r10,mpSpace(r7) ; Get the space
4595 ld r9,mpVAddr(r7) ; And the vaddr
4596 cmplw cr1,r10,r8 ; Is this one of ours?
4597 xor r9,r4,r9 ; Compare virtual address
4598 cmpldi r9,0x1000 ; See if we really match
4599 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4600 beq++ hpfFPnch2x ; Yes, found ours...
4602 ld r7,mpAlias(r7) ; Chain on to the next
4603 b hpfFPnchx ; Check it out...
4607 hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4608 stwcx. r7,0,r7 ; Kill reservation
4610 hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4611 mr. r0,r0 ; Is it locked?
4612 beq++ hpfTLBIE64 ; Yup, wait for it...
4613 b hpfTLBIE63 ; Nope, try again..
4617 hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4618 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4619 bl mapPhysUnlock ; Unlock the physent now
4622 hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4623 eieio ; Make sure this gets there first
4624 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4625 mr r17,r18 ; Get the PCA image to set
4626 b hpfFinish ; Go join the common exit code...
4629 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4630 ori r0,r0,lo16(Choke) ; System abend
4634 ; This is the common code we execute when we are finished setting up the PTE.
4639 hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4640 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4641 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4642 stw r4,mpPte(r31) ; Remember our PTE
4644 hpfBailOut: eieio ; Make sure all updates come first
4645 stw r17,0(r20) ; Unlock and set the final PCA
4648 ; This is where we go if we have started processing the fault, but find that someone
4649 ; else has taken care of it.
4652 hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4653 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4654 sth r2,mpFlags+2(r31) ; Set it
4656 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4657 bl sxlkUnlock ; Unlock the search list
4659 li r11,T_IN_VAIN ; Say that it was handled
4660 b EXT(PFSExit) ; Leave...
4663 ; This is where we go when we find that someone else
4664 ; is in the process of handling the fault.
4667 hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4668 stwcx. r3,0,r3 ; Do it
4670 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4671 bl sxlkUnlock ; Unlock the search list
4673 li r11,T_IN_VAIN ; Say that it was handled
4674 b EXT(PFSExit) ; Leave...
4677 ; Guest shadow assist -- page fault handler
4679 ; Here we handle a fault in a guest pmap that has the guest shadow mapping
4680 ; assist active. We locate the VMM pmap extension block, which contains an
4681 ; index over the discontiguous multi-page shadow hash table. The index
4682 ; corresponding to our vaddr is selected, and the selected group within
4683 ; that page is searched for a valid and active entry that contains
4684 ; our vaddr and space id. The search is pipelined, so that we may fetch
4685 ; the next slot while examining the current slot for a hit. The final
4686 ; search iteration is unrolled so that we don't fetch beyond the end of
4687 ; our group, which could have dire consequences depending upon where the
4688 ; physical hash page is located.
4690 ; The VMM pmap extension block occupies a page. Begining at offset 0, we
4691 ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4692 ; after the pmap_vmm_ext is the hash table physical address index, a
4693 ; linear list of 64-bit physical addresses of the pages that comprise
4696 ; In the event that we succesfully locate a guest mapping, we re-join
4697 ; the page fault path at hpfGVfound with the mapping's address in r31;
4698 ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4699 ; a share of the pmap search lock for the host pmap with the host pmap's
4700 ; address in r28, the guest pmap's space id in r21, and the guest pmap's
4706 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4708 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4709 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4710 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4711 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4712 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4713 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4714 lwz r6,vxsGpf(r11) ; Get guest fault count
4716 srwi r3,r10,12 ; Form shadow hash:
4717 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4718 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4719 ; Form index offset from hash page number
4720 add r31,r31,r4 ; r31 <- hash page index entry
4721 lwz r31,4(r31) ; r31 <- hash page paddr
4722 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4723 ; r31 <- hash group paddr
4725 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4726 bl sxlkShared ; Go get a shared lock on the mapping lists
4727 mr. r3,r3 ; Did we get the lock?
4728 bne- hpfBadLock ; Nope...
4730 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4731 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4732 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4733 addi r6,r6,1 ; Increment guest fault count
4734 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4735 mtctr r0 ; in this group
4736 stw r6,vxsGpf(r11) ; Update guest fault count
4741 mr r6,r3 ; r6 <- current mapping slot's flags
4742 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4743 mr r7,r4 ; r7 <- current mapping slot's space ID
4744 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4745 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4746 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4747 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4748 xor r7,r7,r21 ; Compare space ID
4749 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4750 xor r8,r8,r10 ; Compare virtual address
4751 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4752 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4754 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4755 bdnz hpfGVlp32 ; Iterate
4757 clrrwi r5,r5,12 ; Remove flags from virtual address
4758 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4759 xor r4,r4,r21 ; Compare space ID
4760 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4761 xor r5,r5,r10 ; Compare virtual address
4762 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4763 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4769 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4770 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4771 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4772 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4773 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4774 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4775 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4776 lwz r6,vxsGpf(r11) ; Get guest fault count
4778 srwi r3,r10,12 ; Form shadow hash:
4779 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4780 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4781 ; Form index offset from hash page number
4782 add r31,r31,r4 ; r31 <- hash page index entry
4783 ld r31,0(r31) ; r31 <- hash page paddr
4784 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4785 ; r31 <- hash group paddr
4787 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4788 bl sxlkShared ; Go get a shared lock on the mapping lists
4789 mr. r3,r3 ; Did we get the lock?
4790 bne-- hpfBadLock ; Nope...
4792 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4793 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4794 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4795 addi r6,r6,1 ; Increment guest fault count
4796 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4797 mtctr r0 ; in this group
4798 stw r6,vxsGpf(r11) ; Update guest fault count
4803 mr r6,r3 ; r6 <- current mapping slot's flags
4804 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4805 mr r7,r4 ; r7 <- current mapping slot's space ID
4806 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4807 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4808 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4809 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4810 xor r7,r7,r21 ; Compare space ID
4811 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4812 xor r8,r8,r10 ; Compare virtual address
4813 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4814 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4816 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4817 bdnz hpfGVlp64 ; Iterate
4819 clrrdi r5,r5,12 ; Remove flags from virtual address
4820 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4821 xor r4,r4,r21 ; Compare space ID
4822 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4823 xor r5,r5,r10 ; Compare virtual address
4824 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4825 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4828 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4829 addi r6,r6,1 ; Increment miss count
4830 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4834 * hw_set_user_space(pmap)
4835 * hw_set_user_space_dis(pmap)
4837 * Indicate whether memory space needs to be switched.
4838 * We really need to turn off interrupts here, because we need to be non-preemptable
4840 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4841 * register usage here. The VMM switch code in vmachmon.s that calls this
4842 * know what registers are in use. Check that if these change.
4848 .globl EXT(hw_set_user_space)
4850 LEXT(hw_set_user_space)
4852 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4853 mfmsr r10 ; Get the current MSR
4854 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4855 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4856 andc r10,r10,r8 ; Turn off VEC, FP for good
4857 andc r9,r10,r9 ; Turn off EE also
4858 mtmsr r9 ; Disable them
4859 isync ; Make sure FP and vec are off
4860 mfsprg r6,1 ; Get the current activation
4861 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4862 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4863 mfsprg r4,2 ; The the feature flags
4864 lwz r7,pmapvr(r3) ; Get the v to r translation
4865 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4866 mtcrf 0x80,r4 ; Get the Altivec flag
4867 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4868 cmplw cr1,r3,r2 ; Same address space as before?
4869 stw r7,ppUserPmap(r6) ; Show our real pmap address
4870 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4871 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4872 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4873 mtmsr r10 ; Restore interruptions
4874 beqlr-- cr1 ; Leave if the same address space or not Altivec
4876 dssall ; Need to kill all data streams if adrsp changed
4881 .globl EXT(hw_set_user_space_dis)
4883 LEXT(hw_set_user_space_dis)
4885 lwz r7,pmapvr(r3) ; Get the v to r translation
4886 mfsprg r4,2 ; The the feature flags
4887 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4888 mfsprg r6,1 ; Get the current activation
4889 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4890 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4891 mtcrf 0x80,r4 ; Get the Altivec flag
4892 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4893 cmplw cr1,r3,r2 ; Same address space as before?
4894 stw r7,ppUserPmap(r6) ; Show our real pmap address
4895 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4896 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4897 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4898 beqlr-- cr1 ; Leave if the same
4900 dssall ; Need to kill all data streams if adrsp changed
4904 /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4906 * Lock must already be held on mapping block list
4907 * returns 0 if all slots filled.
4908 * returns n if a slot is found and it is not the last
4909 * returns -n if a slot is found and it is the last
4910 * when n and -n are returned, the corresponding bit is cleared
4911 * the mapping is zeroed out before return
4919 lwz r4,mbfree(r3) ; Get the 1st mask
4920 lis r0,0x8000 ; Get the mask to clear the first free bit
4921 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4922 mr r12,r3 ; Save the block ptr
4923 cntlzw r3,r4 ; Get first 1-bit in 1st word
4924 srw. r9,r0,r3 ; Get bit corresponding to first free one
4925 cntlzw r10,r5 ; Get first free field in second word
4926 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4927 bne mapalc1f ; Found one in 1st word
4929 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4930 li r3,0 ; assume failure return
4931 andc r5,r5,r9 ; Turn it off
4932 beqlr-- ; There are no 1 bits left...
4933 addi r3,r10,32 ; set the correct number
4936 or. r0,r4,r5 ; any more bits set?
4937 stw r4,mbfree(r12) ; update bitmasks
4938 stw r5,mbfree+4(r12)
4940 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4942 dcbz r6,r12 ; clear the 64-byte mapping
4945 bnelr++ ; return if another bit remains set
4947 neg r3,r3 ; indicate we just returned the last bit
4951 /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4953 * Lock must already be held on mapping block list
4954 * returns 0 if all slots filled.
4955 * returns n if a slot is found and it is not the last
4956 * returns -n if a slot is found and it is the last
4957 * when n and -n are returned, the corresponding bits are cleared
4958 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4959 * the mapping is zero'd out before return
4965 lwz r4,mbfree(r3) ; Get the first mask
4966 lis r0,0x8000 ; Get the mask to clear the first free bit
4967 lwz r5,mbfree+4(r3) ; Get the second mask
4968 mr r12,r3 ; Save the block ptr
4969 slwi r6,r4,1 ; shift first word over
4970 and r6,r4,r6 ; lite start of double bit runs in 1st word
4971 slwi r7,r5,1 ; shift 2nd word over
4972 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4973 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4974 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4975 cntlzw r10,r7 ; Get first free field in second word
4976 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4977 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4978 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4979 bne mapalc2a ; Found two consecutive free bits in 1st word
4981 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4982 li r3,0 ; assume failure
4983 srwi r11,r9,1 ; get mask for 2nd bit
4984 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4985 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4986 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4987 addi r3,r10,32 ; set the correct number
4990 or. r0,r4,r5 ; any more bits set?
4991 stw r4,mbfree(r12) ; update bitmasks
4992 stw r5,mbfree+4(r12)
4993 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4997 dcbz r6,r12 ; zero out the 128-byte mapping
4998 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
4999 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
5002 bnelr++ ; return if another bit remains set
5004 neg r3,r3 ; indicate we just returned the last bit
5008 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5009 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5010 beqlr ; no, we failed
5011 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5012 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5013 li r3,31 ; get index of this field
5018 ; This routine initialzes the hash table and PCA.
5019 ; It is done here because we may need to be 64-bit to do it.
5023 .globl EXT(hw_hash_init)
5027 mfsprg r10,2 ; Get feature flags
5028 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5029 mtcrf 0x02,r10 ; move pf64Bit to cr6
5030 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5031 lis r4,0xFF01 ; Set all slots free and start steal at end
5032 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5033 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5035 lwz r12,0(r12) ; Get hash table size
5037 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5039 lwz r11,4(r11) ; Get hash table base
5041 hhiNext32: cmplw r3,r12 ; Have we reached the end?
5042 bge- hhiCPCA32 ; Yes...
5043 dcbz r3,r11 ; Clear the line
5044 addi r3,r3,32 ; Next one...
5045 b hhiNext32 ; Go on...
5047 hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5048 li r3,-4 ; Displacement to first PCA entry
5049 neg r12,r12 ; Get negative end of PCA
5051 hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5052 subi r3,r3,4 ; Next slot
5053 cmpw r3,r12 ; Have we finished?
5054 bge+ hhiNPCA32 ; Not yet...
5057 hhiSF: mfmsr r9 ; Save the MSR
5059 mr r0,r9 ; Get a copy of the MSR
5060 ld r11,0(r11) ; Get hash table base
5061 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5062 mtmsrd r0 ; Turn on SF
5066 hhiNext64: cmpld r3,r12 ; Have we reached the end?
5067 bge-- hhiCPCA64 ; Yes...
5068 dcbz128 r3,r11 ; Clear the line
5069 addi r3,r3,128 ; Next one...
5070 b hhiNext64 ; Go on...
5072 hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5073 li r3,-4 ; Displacement to first PCA entry
5074 neg r12,r12 ; Get negative end of PCA
5076 hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5077 subi r3,r3,4 ; Next slot
5078 cmpd r3,r12 ; Have we finished?
5079 bge++ hhiNPCA64 ; Not yet...
5081 mtmsrd r9 ; Turn off SF if it was off
5087 ; This routine sets up the hardware to start translation.
5088 ; Note that we do NOT start translation.
5092 .globl EXT(hw_setup_trans)
5094 LEXT(hw_setup_trans)
5096 mfsprg r11,0 ; Get the per_proc block
5097 mfsprg r12,2 ; Get feature flags
5100 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5101 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5102 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5103 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5104 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5106 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5108 li r9,0 ; Clear out a register
5111 mtdbatu 0,r9 ; Invalidate maps
5112 mtdbatl 0,r9 ; Invalidate maps
5113 mtdbatu 1,r9 ; Invalidate maps
5114 mtdbatl 1,r9 ; Invalidate maps
5115 mtdbatu 2,r9 ; Invalidate maps
5116 mtdbatl 2,r9 ; Invalidate maps
5117 mtdbatu 3,r9 ; Invalidate maps
5118 mtdbatl 3,r9 ; Invalidate maps
5120 mtibatu 0,r9 ; Invalidate maps
5121 mtibatl 0,r9 ; Invalidate maps
5122 mtibatu 1,r9 ; Invalidate maps
5123 mtibatl 1,r9 ; Invalidate maps
5124 mtibatu 2,r9 ; Invalidate maps
5125 mtibatl 2,r9 ; Invalidate maps
5126 mtibatu 3,r9 ; Invalidate maps
5127 mtibatl 3,r9 ; Invalidate maps
5129 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5130 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5131 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5132 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5133 lwz r11,4(r11) ; Get hash table base
5134 lwz r12,0(r12) ; Get hash table size
5135 subi r12,r12,1 ; Back off by 1
5136 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5138 mtsdr1 r11 ; Ok, we now have the hash table set up
5141 li r12,invalSpace ; Get the invalid segment value
5142 li r10,0 ; Start low
5144 hstsetsr: mtsrin r12,r10 ; Set the SR
5145 addis r10,r10,0x1000 ; Bump the segment
5146 mr. r10,r10 ; Are we finished?
5147 bne+ hstsetsr ; Nope...
5155 hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5156 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5157 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5158 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5159 ld r11,0(r11) ; Get hash table base
5160 lwz r12,0(r12) ; Get hash table size
5161 cntlzw r10,r12 ; Get the number of bits
5162 subfic r10,r10,13 ; Get the extra bits we need
5163 or r11,r11,r10 ; Add the size field to SDR1
5165 mtsdr1 r11 ; Ok, we now have the hash table set up
5168 li r0,0 ; Set an SLB slot index of 0
5169 slbia ; Trash all SLB entries (except for entry 0 that is)
5170 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5171 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5172 slbie r7 ; Invalidate it
5178 ; This routine turns on translation for the first time on a processor
5182 .globl EXT(hw_start_trans)
5184 LEXT(hw_start_trans)
5187 mfmsr r10 ; Get the msr
5188 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5190 mtmsr r10 ; Everything falls apart here
5198 ; This routine validates a segment register.
5199 ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5202 ; r4 = segment[0:31]
5203 ; r5 = segment[32:63]
5207 ; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5208 ; Note that there is no reason to apply the key modifier here because this is only
5209 ; used for kernel accesses.
5213 .globl EXT(hw_map_seg)
5217 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5218 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5219 mfsprg r10,2 ; Get feature flags
5222 ; Note: the following code would problably be easier to follow if I split it,
5223 ; but I just wanted to see if I could write this to work on both 32- and 64-bit
5224 ; machines combined.
5228 ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5229 ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5231 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5232 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5233 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5234 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5235 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5236 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5237 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5238 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5239 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5240 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5242 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5243 ; concatenated together. There is garbage
5244 ; at the top for 64-bit but we will clean
5246 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5250 ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5251 ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5255 ; What we have now is:
5258 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5259 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5260 ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5261 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5262 ; 0 0 1 2 3 - for 32-bit machines
5266 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5267 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5268 ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5269 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5270 ; 0 0 1 2 3 - for 32-bit machines
5274 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5275 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5276 ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5277 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5278 ; 0 0 1 2 3 - for 32-bit machines
5282 xor r8,r8,r2 ; Calculate VSID
5284 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
5285 mfsprg r12,0 ; Get the per_proc
5286 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5287 mfmsr r6 ; Get current MSR
5288 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5289 mtmsrd r0,1 ; Set only the EE bit to 0
5290 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5291 mfmsr r11 ; Get the MSR right now, after disabling EE
5292 andc r2,r11,r2 ; Turn off translation now
5293 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5294 or r11,r11,r6 ; Turn on the EE bit if it was on
5295 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5296 isync ; Hang out a bit
5298 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5299 sldi r9,r9,9 ; Position the key and noex bit
5301 rldimi r5,r8,12,0 ; Form the VSID/key
5303 not r3,r6 ; Make valids be 0s
5305 cntlzd r7,r3 ; Find a free SLB
5306 cmplwi r7,63 ; Did we find a free SLB entry?
5308 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5310 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5311 addi r7,r7,1 ; Make sure we skip slb 0
5312 blt++ hmsFreeSeg ; Yes, go load it...
5315 ; No free SLB entries, select one that is in use and invalidate it
5317 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5318 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5319 addi r2,r2,1 ; Set next slot to steal
5320 slbmfee r3,r7 ; Get the entry that is in the selected spot
5321 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5322 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5323 srawi r8,r8,31 ; Get -1 if steal index still in range
5324 slbie r3 ; Invalidate the in-use SLB entry
5325 and r2,r2,r8 ; Reset steal index when it should wrap
5328 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5330 ; We are now ready to stick the SLB entry in the SLB and mark it in use
5333 hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5334 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5335 srd r0,r0,r2 ; Set bit mask for allocation
5336 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5337 or r6,r6,r0 ; Turn on the allocation flag
5339 slbmte r5,r4 ; Make that SLB entry
5341 std r6,validSegs(r12) ; Mark as valid
5342 mtmsrd r11 ; Restore the MSR
5349 mfsprg r12,1 ; Get the current activation
5350 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5351 rlwinm r8,r8,0,8,31 ; Clean up the VSID
5352 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5353 lis r0,0x8000 ; Set bit 0
5354 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5355 srw r0,r0,r2 ; Get bit corresponding to SR
5356 addi r7,r12,validSegs ; Point to the valid segment flags directly
5358 mtsrin r8,r4 ; Set the actual SR
5359 isync ; Need to make sure this is done
5361 hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5362 or r6,r6,r0 ; Show that SR is valid
5363 stwcx. r6,0,r7 ; Set the valid SR flags
5364 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5370 ; This routine invalidates a segment register.
5374 .globl EXT(hw_blow_seg)
5378 mfsprg r10,2 ; Get feature flags
5379 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5381 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5383 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5385 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5386 mfmsr r6 ; Get current MSR
5387 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5388 mtmsrd r0,1 ; Set only the EE bit to 0
5389 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5390 mfmsr r11 ; Get the MSR right now, after disabling EE
5391 andc r2,r11,r2 ; Turn off translation now
5392 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5393 or r11,r11,r6 ; Turn on the EE bit if it was on
5394 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5395 isync ; Hang out a bit
5397 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5399 slbie r9 ; Invalidate the associated SLB entry
5401 mtmsrd r11 ; Restore the MSR
5408 mfsprg r12,1 ; Get the current activation
5409 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5410 addi r7,r12,validSegs ; Point to the valid segment flags directly
5411 lwarx r4,0,r7 ; Get and reserve the valid segment flags
5412 rlwinm r6,r9,4,28,31 ; Convert segment to number
5413 lis r2,0x8000 ; Set up a mask
5414 srw r2,r2,r6 ; Make a mask
5415 and. r0,r4,r2 ; See if this is even valid
5416 li r5,invalSpace ; Set the invalid address space VSID
5417 beqlr ; Leave if already invalid...
5419 mtsrin r5,r9 ; Slam the segment register
5420 isync ; Need to make sure this is done
5422 hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5423 stwcx. r4,0,r7 ; Set the valid SR flags
5424 beqlr++ ; Stored ok, no interrupt, time to leave...
5426 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5427 b hbsrupt ; Try again...
5430 ; This routine invadates the entire pmap segment cache
5432 ; Translation is on, interrupts may or may not be enabled.
5436 .globl EXT(invalidateSegs)
5438 LEXT(invalidateSegs)
5440 la r10,pmapCCtl(r3) ; Point to the segment cache control
5441 eqv r2,r2,r2 ; Get all foxes
5443 isInv: lwarx r4,0,r10 ; Get the segment cache control value
5444 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5445 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5446 bne-- isInv0 ; Yes, try again...
5448 stwcx. r4,0,r10 ; Try to invalidate it
5449 bne-- isInv ; Someone else just stuffed it...
5453 isInv0: li r4,lgKillResv ; Get reservation kill zone
5454 stwcx. r4,0,r4 ; Kill reservation
5456 isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5457 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5458 bne-- isInv ; Nope...
5459 b isInv1 ; Still locked do it again...
5462 ; This routine switches segment registers between kernel and user.
5463 ; We have some assumptions and rules:
5464 ; We are in the exception vectors
5465 ; pf64Bitb is set up
5466 ; R3 contains the MSR we going to
5467 ; We can not use R4, R13, R20, R21, R29
5468 ; R13 is the savearea
5469 ; R29 has the per_proc
5471 ; We return R3 as 0 if we did not switch between kernel and user
5472 ; We also maintain and apply the user state key modifier used by VMM support;
5473 ; If we go to the kernel it is set to 0, otherwise it follows the bit
5478 .globl EXT(switchSegs)
5482 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5483 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5484 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5485 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5486 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5487 or r2,r2,r3 ; This will 1 if we will be using user segments
5488 li r3,0 ; Get a selection mask
5489 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5490 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5491 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5492 la r19,ppUserPmap(r29) ; Point to the current user pmap
5494 ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5495 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5497 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5498 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5499 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5500 or r8,r8,r19 ; Get the pointer to the pmap we are using
5502 beqlr ; We are staying in the same mode, do not touch segs...
5504 lwz r28,0(r8) ; Get top half of pmap address
5505 lwz r10,4(r8) ; Get bottom half
5507 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5508 rlwinm r28,r28,0,1,0 ; Copy top to top
5509 stw r30,ppMapFlags(r29) ; Set the key modifier
5510 rlwimi r28,r10,0,0,31 ; Insert bottom
5512 la r10,pmapCCtl(r28) ; Point to the segment cache control
5513 la r9,pmapSegCache(r28) ; Point to the segment cache
5515 ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5516 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5517 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5518 bne-- ssgLock0 ; Yup, this is in use...
5520 stwcx. r16,0,r10 ; Try to set the lock
5521 bne-- ssgLock ; Did we get contention?
5523 not r11,r15 ; Invert the invalids to valids
5524 li r17,0 ; Set a mask for the SRs we are loading
5525 isync ; Make sure we are all caught up
5527 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5530 slbia ; Trash all SLB entries (except for entry 0 that is)
5531 li r17,1 ; Get SLB index to load (skip slb 0)
5532 oris r0,r0,0x8000 ; Get set for a mask
5533 b ssg64Enter ; Start on a cache line...
5537 ssgLock0: li r15,lgKillResv ; Killing field
5538 stwcx. r15,0,r15 ; Kill reservation
5540 ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5541 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5542 beq++ ssgLock ; Yup, this is in use...
5543 b ssgLock1 ; Nope, try again...
5545 ; This is the 32-bit address space switch code.
5546 ; We take a reservation on the segment cache and walk through.
5547 ; For each entry, we load the specified entries and remember which
5548 ; we did with a mask. Then, we figure out which segments should be
5549 ; invalid and then see which actually are. Then we load those with the
5550 ; defined invalid VSID.
5551 ; Afterwards, we unlock the segment cache.
5556 ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5557 cmplwi r12,pmapSegCacheUse ; See if we are done
5558 slwi r14,r12,4 ; Index to the cache slot
5559 lis r0,0x8000 ; Get set for a mask
5560 add r14,r14,r9 ; Point to the entry
5562 bge- ssg32Done ; All done...
5564 lwz r5,sgcESID+4(r14) ; Get the ESID part
5565 srw r2,r0,r12 ; Form a mask for the one we are loading
5566 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5568 andc r11,r11,r2 ; Clear the bit
5569 lwz r6,sgcVSID(r14) ; And get the VSID top
5571 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5573 xor r7,r7,r30 ; Modify the key before we actually set it
5574 srw r0,r0,r2 ; Get a mask for the SR we are loading
5575 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5576 or r17,r17,r0 ; Remember the segment
5577 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5578 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5580 mtsrin r8,r5 ; Load the segment
5581 b ssg32Enter ; Go enter the next...
5585 ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5586 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5588 lis r0,0x8000 ; Get set for a mask
5589 li r2,invalSpace ; Set the invalid address space VSID
5593 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5596 ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5597 cmplwi r18,16 ; Have we finished?
5598 srw r22,r0,r18 ; Get the mask bit
5599 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5600 andc r16,r16,r22 ; Get rid of the guy we just did
5601 bge ssg32Really ; Yes, we are really done now...
5603 mtsrin r2,r23 ; Invalidate the SR
5604 b ssg32Inval ; Do the next...
5609 stw r17,validSegs(r29) ; Set the valid SR flags
5610 li r3,1 ; Set kernel/user transition
5614 ; This is the 64-bit address space switch code.
5615 ; First we blow away all of the SLB entries.
5617 ; loading the SLB. Afterwards, we release the cache lock
5619 ; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5620 ; Its a performance thing...
5625 ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5626 cmplwi r12,pmapSegCacheUse ; See if we are done
5627 slwi r14,r12,4 ; Index to the cache slot
5628 srw r16,r0,r12 ; Form a mask for the one we are loading
5629 add r14,r14,r9 ; Point to the entry
5630 andc r11,r11,r16 ; Clear the bit
5631 bge-- ssg64Done ; All done...
5633 ld r5,sgcESID(r14) ; Get the ESID part
5634 ld r6,sgcVSID(r14) ; And get the VSID part
5635 oris r5,r5,0x0800 ; Turn on the valid bit
5636 or r5,r5,r17 ; Insert the SLB slot
5637 xor r6,r6,r30 ; Modify the key before we actually set it
5638 addi r17,r17,1 ; Bump to the next slot
5639 slbmte r6,r5 ; Make that SLB entry
5640 b ssg64Enter ; Go enter the next...
5644 ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5646 eqv r16,r16,r16 ; Load up with all foxes
5647 subfic r17,r17,64 ; Get the number of 1 bits we need
5649 sld r16,r16,r17 ; Get a mask for the used SLB entries
5650 li r3,1 ; Set kernel/user transition
5651 std r16,validSegs(r29) ; Set the valid SR flags
5655 ; mapSetUp - this function sets initial state for all mapping functions.
5656 ; We turn off all translations (physical), disable interruptions, and
5657 ; enter 64-bit mode if applicable.
5659 ; We also return the original MSR in r11, the feature flags in R12,
5660 ; and CR6 set up so we can do easy branches for 64-bit
5661 ; hw_clear_maps assumes r10, r9 will not be trashed.
5665 .globl EXT(mapSetUp)
5669 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5670 mfsprg r12,2 ; Get feature flags
5671 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5672 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5673 mfmsr r11 ; Save the MSR
5674 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5675 andc r11,r11,r0 ; Clear VEC and FP for good
5676 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5677 li r2,1 ; Prepare for 64 bit
5678 andc r0,r11,r0 ; Clear the rest
5679 bt pfNoMSRirb,msuNoMSR ; No MSR...
5680 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
5682 mtmsr r0 ; Translation and all off
5683 isync ; Toss prefetch
5688 msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5689 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5695 msuNoMSR: mr r2,r3 ; Save R3 across call
5696 mr r3,r0 ; Get the new MSR value
5697 li r0,loadMSR ; Get the MSR setter SC
5699 mr r3,r2 ; Restore R3
5700 blr ; Go back all set up...
5704 ; Guest shadow assist -- remove all guest mappings
5706 ; Remove all mappings for a guest pmap from the shadow hash table.
5709 ; r3 : address of pmap, 32-bit kernel virtual address
5711 ; Non-volatile register usage:
5712 ; r24 : host pmap's physical address
5713 ; r25 : VMM extension block's physical address
5714 ; r26 : physent address
5715 ; r27 : guest pmap's space ID number
5716 ; r28 : current hash table page index
5717 ; r29 : guest pmap's physical address
5718 ; r30 : saved msr image
5719 ; r31 : current mapping
5722 .globl EXT(hw_rem_all_gv)
5726 #define graStackSize ((31-24+1)*4)+4
5727 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5728 ; Mint a new stack frame
5729 mflr r0 ; Get caller's return address
5730 mfsprg r11,2 ; Get feature flags
5731 mtcrf 0x02,r11 ; Insert feature flags into cr6
5732 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5733 ; Save caller's return address
5734 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5735 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5736 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5737 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5738 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5739 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5740 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5741 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5743 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5745 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5746 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5747 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5748 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5749 b graStart ; Get to it
5750 gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5751 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5752 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5753 graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5754 xor r29,r3,r9 ; Convert pmap_t virt->real
5755 mr r30,r11 ; Save caller's msr image
5757 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5758 bl sxlkExclusive ; Get lock exclusive
5760 lwz r3,vxsGra(r25) ; Get remove all count
5761 addi r3,r3,1 ; Increment remove all count
5762 stw r3,vxsGra(r25) ; Update remove all count
5764 li r28,0 ; r28 <- first hash page table index to search
5765 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5767 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5768 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5769 ; Convert page index into page physical index offset
5770 add r31,r31,r11 ; Calculate page physical index entry address
5771 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5772 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5773 b graLoop ; Examine all slots in this page
5774 gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5775 b graLoop ; Examine all slots in this page
5778 graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5779 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5780 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5781 xor r4,r4,r27 ; Compare space ID number
5782 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5783 bne graMiss ; Not one of ours, skip it
5785 lwz r11,vxsGraHits(r25) ; Get remove hit count
5786 addi r11,r11,1 ; Increment remove hit count
5787 stw r11,vxsGraHits(r25) ; Update remove hit count
5789 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5790 bne graRemPhys ; Yes, nothing to disconnect
5792 lwz r11,vxsGraActive(r25) ; Get remove active count
5793 addi r11,r11,1 ; Increment remove hit count
5794 stw r11,vxsGraActive(r25) ; Update remove hit count
5796 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5797 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5798 ; r31 <- mapping's physical address
5799 ; r3 -> PTE slot physical address
5800 ; r4 -> High-order 32 bits of PTE
5801 ; r5 -> Low-order 32 bits of PTE
5803 ; r7 -> PCA physical address
5804 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5805 b graFreePTE ; Join 64-bit path to release the PTE
5806 graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5807 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5808 graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5809 beq- graRemPhys ; No valid PTE, we're almost done
5810 lis r0,0x8000 ; Prepare free bit for this slot
5811 srw r0,r0,r2 ; Position free bit
5812 or r6,r6,r0 ; Set it in our PCA image
5813 lwz r8,mpPte(r31) ; Get PTE pointer
5814 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5815 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5816 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5817 stw r6,0(r7) ; Update PCA and unlock the PTEG
5820 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5821 bl mapFindLockPN ; Find 'n' lock this page's physent
5822 mr. r26,r3 ; Got lock on our physent?
5823 beq-- graBadPLock ; No, time to bail out
5825 crset cr1_eq ; cr1_eq <- previous link is the anchor
5826 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5827 la r11,ppLink+4(r26) ; Point to chain anchor
5828 lwz r9,ppLink+4(r26) ; Get chain anchor
5829 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5831 graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5832 cmplw r9,r31 ; Is this the mapping to remove?
5833 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5834 bne graRemNext ; No, chain onward
5835 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5836 stw r8,0(r11) ; Unchain gpv->phys mapping
5837 b graRemoved ; Exit loop
5839 lwarx r0,0,r11 ; Get previous link
5840 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5841 stwcx. r0,0,r11 ; Update previous link
5842 bne- graRemRetry ; Lost reservation, retry
5843 b graRemoved ; Good work, let's get outta here
5845 graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5846 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5847 mr. r9,r8 ; Does next entry exist?
5848 b graRemLoop ; Carry on
5851 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5852 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5853 la r11,ppLink(r26) ; Point to chain anchor
5854 ld r9,ppLink(r26) ; Get chain anchor
5855 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5856 graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5857 cmpld r9,r31 ; Is this the mapping to remove?
5858 ld r8,mpAlias(r9) ; Get forward chain pinter
5859 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5860 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5861 std r8,0(r11) ; Unchain gpv->phys mapping
5862 b graRemoved ; Exit loop
5863 graRem64Rt: ldarx r0,0,r11 ; Get previous link
5864 and r0,r0,r7 ; Get flags
5865 or r0,r0,r8 ; Insert new forward pointer
5866 stdcx. r0,0,r11 ; Slam it back in
5867 bne-- graRem64Rt ; Lost reservation, retry
5868 b graRemoved ; Good work, let's go home
5871 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5872 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5873 mr. r9,r8 ; Does next entry exist?
5874 b graRem64Lp ; Carry on
5877 mr r3,r26 ; r3 <- physent's address
5878 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5880 lwz r3,mpFlags(r31) ; Get mapping's flags
5881 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5882 ori r3,r3,mpgFree ; Mark mapping free
5883 stw r3,mpFlags(r31) ; Update flags
5885 graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5886 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5887 bne graLoop ; No, examine next slot
5888 addi r28,r28,1 ; Increment hash table page index
5889 cmplwi r28,GV_HPAGES ; End of hash table?
5890 bne graPgLoop ; Examine next hash table page
5892 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5893 bl sxlkUnlock ; Release host pmap's search lock
5895 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5896 mtmsr r30 ; Restore 'rupts, translation
5897 isync ; Throw a small wrench into the pipeline
5898 b graPopFrame ; Nothing to do now but pop a frame and return
5899 graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5901 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5902 ; Get caller's return address
5903 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5904 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5905 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5906 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5907 mtlr r0 ; Prepare return address
5908 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5909 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5910 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5911 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5912 lwz r1,0(r1) ; Pop stack frame
5913 blr ; Return to caller
5917 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5918 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5919 li r3,failMapping ; The BOMB, Dmitri.
5920 sc ; The hydrogen bomb.
5924 ; Guest shadow assist -- remove local guest mappings
5926 ; Remove local mappings for a guest pmap from the shadow hash table.
5929 ; r3 : address of guest pmap, 32-bit kernel virtual address
5931 ; Non-volatile register usage:
5932 ; r20 : current active map word's physical address
5933 ; r21 : current hash table page address
5934 ; r22 : updated active map word in process
5935 ; r23 : active map word in process
5936 ; r24 : host pmap's physical address
5937 ; r25 : VMM extension block's physical address
5938 ; r26 : physent address
5939 ; r27 : guest pmap's space ID number
5940 ; r28 : current active map index
5941 ; r29 : guest pmap's physical address
5942 ; r30 : saved msr image
5943 ; r31 : current mapping
5946 .globl EXT(hw_rem_local_gv)
5948 LEXT(hw_rem_local_gv)
5950 #define grlStackSize ((31-20+1)*4)+4
5951 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5952 ; Mint a new stack frame
5953 mflr r0 ; Get caller's return address
5954 mfsprg r11,2 ; Get feature flags
5955 mtcrf 0x02,r11 ; Insert feature flags into cr6
5956 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5957 ; Save caller's return address
5958 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5959 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5960 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5961 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5962 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5963 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5964 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5965 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5966 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
5967 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
5968 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
5969 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
5971 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5973 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
5974 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5975 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5976 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5977 b grlStart ; Get to it
5978 grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5979 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5980 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5982 grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5983 xor r29,r3,r9 ; Convert pmap_t virt->real
5984 mr r30,r11 ; Save caller's msr image
5986 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5987 bl sxlkExclusive ; Get lock exclusive
5989 li r28,0 ; r28 <- index of first active map word to search
5990 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5991 b grlMap1st ; Examine first map word
5994 grlNextMap: stw r22,0(r21) ; Save updated map word
5995 addi r28,r28,1 ; Increment map word index
5996 cmplwi r28,GV_MAP_WORDS ; See if we're done
5997 beq grlDone ; Yup, let's get outta here
5999 grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
6000 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
6001 ; Convert map index into map index offset
6002 add r20,r20,r11 ; Calculate map array element address
6003 lwz r22,0(r20) ; Get active map word at index
6004 mr. r23,r22 ; Any active mappings indicated?
6005 beq grlNextMap ; Nope, check next word
6007 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6008 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6009 ; Extract page index from map word index and convert
6010 ; into page physical index offset
6011 add r21,r21,r11 ; Calculate page physical index entry address
6012 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6013 lwz r21,4(r21) ; Get selected hash table page's address
6014 b grlLoop ; Examine all slots in this page
6015 grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6016 b grlLoop ; Examine all slots in this page
6019 grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6020 cmplwi r11,32 ; Any active mappings left in this word?
6021 lis r12,0x8000 ; Prepare mask to reset bit
6022 srw r12,r12,r11 ; Position mask bit
6023 andc r23,r23,r12 ; Reset lit bit
6024 beq grlNextMap ; No bits lit, examine next map word
6026 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6027 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6028 ; Extract slot band number from index and insert
6029 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6031 lwz r3,mpFlags(r31) ; Get mapping's flags
6032 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6033 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6034 xor r4,r4,r27 ; Compare space ID number
6035 or. r4,r4,r5 ; (space id miss || global)
6036 bne grlLoop ; Not one of ours, skip it
6037 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6038 ori r3,r3,mpgDormant ; Mark entry dormant
6039 stw r3,mpFlags(r31) ; Update mapping's flags
6041 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6042 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6043 ; r31 <- mapping's physical address
6044 ; r3 -> PTE slot physical address
6045 ; r4 -> High-order 32 bits of PTE
6046 ; r5 -> Low-order 32 bits of PTE
6048 ; r7 -> PCA physical address
6049 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6050 b grlFreePTE ; Join 64-bit path to release the PTE
6051 grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6052 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6053 grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6054 beq- grlLoop ; No valid PTE, we're done with this mapping
6055 lis r0,0x8000 ; Prepare free bit for this slot
6056 srw r0,r0,r2 ; Position free bit
6057 or r6,r6,r0 ; Set it in our PCA image
6058 lwz r8,mpPte(r31) ; Get PTE pointer
6059 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6060 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6061 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6062 stw r6,0(r7) ; Update PCA and unlock the PTEG
6063 b grlLoop ; On to next active mapping in this map word
6065 grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6066 bl sxlkUnlock ; Release host pmap's search lock
6068 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6069 mtmsr r30 ; Restore 'rupts, translation
6070 isync ; Throw a small wrench into the pipeline
6071 b grlPopFrame ; Nothing to do now but pop a frame and return
6072 grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6074 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6075 ; Get caller's return address
6076 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6077 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6078 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6079 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6080 mtlr r0 ; Prepare return address
6081 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6082 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6083 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6084 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6085 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6086 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6087 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6088 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6089 lwz r1,0(r1) ; Pop stack frame
6090 blr ; Return to caller
6094 ; Guest shadow assist -- resume a guest mapping
6096 ; Locates the specified dormant mapping, and if it exists validates it and makes it
6100 ; r3 : address of host pmap, 32-bit kernel virtual address
6101 ; r4 : address of guest pmap, 32-bit kernel virtual address
6102 ; r5 : host virtual address, high-order 32 bits
6103 ; r6 : host virtual address, low-order 32 bits
6104 ; r7 : guest virtual address, high-order 32 bits
6105 ; r8 : guest virtual address, low-order 32 bits
6106 ; r9 : guest mapping protection code
6108 ; Non-volatile register usage:
6109 ; r23 : VMM extension block's physical address
6110 ; r24 : physent physical address
6111 ; r25 : caller's msr image from mapSetUp
6112 ; r26 : guest mapping protection code
6113 ; r27 : host pmap physical address
6114 ; r28 : guest pmap physical address
6115 ; r29 : host virtual address
6116 ; r30 : guest virtual address
6117 ; r31 : gva->phys mapping's physical address
6120 .globl EXT(hw_res_map_gv)
6124 #define grsStackSize ((31-23+1)*4)+4
6126 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6127 ; Mint a new stack frame
6128 mflr r0 ; Get caller's return address
6129 mfsprg r11,2 ; Get feature flags
6130 mtcrf 0x02,r11 ; Insert feature flags into cr6
6131 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6132 ; Save caller's return address
6133 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6134 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6135 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6136 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6137 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6138 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6139 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6140 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6141 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6143 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6144 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6145 mr r26,r9 ; Copy guest mapping protection code
6147 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6148 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6149 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6150 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6151 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6152 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6153 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6154 srwi r11,r30,12 ; Form shadow hash:
6155 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6156 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6157 ; Form index offset from hash page number
6158 add r31,r31,r10 ; r31 <- hash page index entry
6159 lwz r31,4(r31) ; r31 <- hash page paddr
6160 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6161 ; r31 <- hash group paddr
6162 b grsStart ; Get to it
6164 grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6165 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6166 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6167 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6168 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6169 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6170 srwi r11,r30,12 ; Form shadow hash:
6171 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6172 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6173 ; Form index offset from hash page number
6174 add r31,r31,r10 ; r31 <- hash page index entry
6175 ld r31,0(r31) ; r31 <- hash page paddr
6176 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6177 ; r31 <- hash group paddr
6179 grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6180 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6181 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6182 mr r25,r11 ; Save caller's msr image
6184 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6185 bl sxlkExclusive ; Get lock exclusive
6187 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6188 mtctr r0 ; in this group
6189 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6191 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6192 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6193 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6194 b grs32SrchLp ; Let the search begin!
6198 mr r6,r3 ; r6 <- current mapping slot's flags
6199 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6200 mr r7,r4 ; r7 <- current mapping slot's space ID
6201 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6202 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6203 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6204 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6205 xor r7,r7,r9 ; Compare space ID
6206 or r0,r11,r7 ; r0 <- !(!free && space match)
6207 xor r8,r8,r30 ; Compare virtual address
6208 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6209 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6211 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6212 bdnz grs32SrchLp ; Iterate
6214 mr r6,r3 ; r6 <- current mapping slot's flags
6215 clrrwi r5,r5,12 ; Remove flags from virtual address
6216 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6217 xor r4,r4,r9 ; Compare space ID
6218 or r0,r11,r4 ; r0 <- !(!free && space match)
6219 xor r5,r5,r30 ; Compare virtual address
6220 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6221 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6222 b grsSrchMiss ; No joy in our hash group
6225 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6226 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6227 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6228 b grs64SrchLp ; Let the search begin!
6232 mr r6,r3 ; r6 <- current mapping slot's flags
6233 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6234 mr r7,r4 ; r7 <- current mapping slot's space ID
6235 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6236 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6237 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6238 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6239 xor r7,r7,r9 ; Compare space ID
6240 or r0,r11,r7 ; r0 <- !(!free && space match)
6241 xor r8,r8,r30 ; Compare virtual address
6242 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6243 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6245 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6246 bdnz grs64SrchLp ; Iterate
6248 mr r6,r3 ; r6 <- current mapping slot's flags
6249 clrrdi r5,r5,12 ; Remove flags from virtual address
6250 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6251 xor r4,r4,r9 ; Compare space ID
6252 or r0,r11,r4 ; r0 <- !(!free && space match)
6253 xor r5,r5,r30 ; Compare virtual address
6254 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6255 bne grsSrchMiss ; No joy in our hash group
6258 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6259 bne grsFindHost ; Yes, nothing to disconnect
6261 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6262 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6263 ; r31 <- mapping's physical address
6264 ; r3 -> PTE slot physical address
6265 ; r4 -> High-order 32 bits of PTE
6266 ; r5 -> Low-order 32 bits of PTE
6268 ; r7 -> PCA physical address
6269 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6270 b grsFreePTE ; Join 64-bit path to release the PTE
6271 grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6272 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6273 grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6274 beq- grsFindHost ; No valid PTE, we're almost done
6275 lis r0,0x8000 ; Prepare free bit for this slot
6276 srw r0,r0,r2 ; Position free bit
6277 or r6,r6,r0 ; Set it in our PCA image
6278 lwz r8,mpPte(r31) ; Get PTE pointer
6279 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6280 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6281 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6282 stw r6,0(r7) ; Update PCA and unlock the PTEG
6286 // We now have a dormant guest mapping that matches our space id and virtual address. Our next
6287 // step is to locate the host mapping that completes the guest mapping's connection to a physical
6288 // frame. The guest and host mappings must connect to the same physical frame, so they must both
6289 // be chained on the same physent. We search the physent chain for a host mapping matching our
6290 // host's space id and the host virtual address. If we succeed, we know that the entire chain
6291 // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6292 // resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6293 // host virtual or physical address has changed since the guest mapping was suspended, so it
6294 // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6295 // our caller that it will have to take its long path, translating the host virtual address
6296 // through the host's skiplist and installing a new guest mapping.
6298 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6299 bl mapFindLockPN ; Find 'n' lock this page's physent
6300 mr. r24,r3 ; Got lock on our physent?
6301 beq-- grsBadPLock ; No, time to bail out
6303 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6305 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6306 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6307 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6308 grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6309 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6310 lwz r7,mpFlags(r12) ; Get mapping's flags
6311 lhz r4,mpSpace(r12) ; Get mapping's space id number
6312 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6313 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6315 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6316 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6317 xori r0,r0,mpNormal ; Normal mapping?
6318 xor r4,r4,r6 ; Compare w/ host space id number
6319 xor r5,r5,r29 ; Compare w/ host virtual address
6320 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6321 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6323 b grsPELoop ; Iterate
6325 grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6326 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6327 ld r9,ppLink(r24) ; Get first mapping on physent
6328 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6329 andc r9,r9,r0 ; Cleanup mapping pointer
6330 grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6331 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6332 lwz r7,mpFlags(r12) ; Get mapping's flags
6333 lhz r4,mpSpace(r12) ; Get mapping's space id number
6334 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6335 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6336 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6337 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6338 xori r0,r0,mpNormal ; Normal mapping?
6339 xor r4,r4,r6 ; Compare w/ host space id number
6340 xor r5,r5,r29 ; Compare w/ host virtual address
6341 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6342 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6344 b grsPELp64 ; Iterate
6346 grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6347 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6348 stw r0,mpVAddr+4(r31) ; Write 'em back
6350 eieio ; Ensure previous mapping updates are visible
6351 lwz r0,mpFlags(r31) ; Get flags
6352 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6353 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6355 li r31,mapRtOK ; Indicate success
6356 b grsRelPhy ; Exit through physent lock release
6358 grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6359 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6360 la r11,ppLink+4(r24) ; Point to chain anchor
6361 lwz r9,ppLink+4(r24) ; Get chain anchor
6362 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6363 grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6364 cmplw r9,r31 ; Is this the mapping to remove?
6365 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6366 bne grsRemNext ; No, chain onward
6367 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6368 stw r8,0(r11) ; Unchain gpv->phys mapping
6369 b grsDelete ; Finish deleting mapping
6371 lwarx r0,0,r11 ; Get previous link
6372 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6373 stwcx. r0,0,r11 ; Update previous link
6374 bne- grsRemRetry ; Lost reservation, retry
6375 b grsDelete ; Finish deleting mapping
6378 grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6379 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6380 mr. r9,r8 ; Does next entry exist?
6381 b grsRemLoop ; Carry on
6384 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6385 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6386 la r11,ppLink(r24) ; Point to chain anchor
6387 ld r9,ppLink(r24) ; Get chain anchor
6388 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6389 grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6390 cmpld r9,r31 ; Is this the mapping to remove?
6391 ld r8,mpAlias(r9) ; Get forward chain pinter
6392 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6393 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6394 std r8,0(r11) ; Unchain gpv->phys mapping
6395 b grsDelete ; Finish deleting mapping
6396 grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6397 and r0,r0,r7 ; Get flags
6398 or r0,r0,r8 ; Insert new forward pointer
6399 stdcx. r0,0,r11 ; Slam it back in
6400 bne-- grsRem64Rt ; Lost reservation, retry
6401 b grsDelete ; Finish deleting mapping
6405 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6406 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6407 mr. r9,r8 ; Does next entry exist?
6408 b grsRem64Lp ; Carry on
6411 lwz r3,mpFlags(r31) ; Get mapping's flags
6412 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6413 ori r3,r3,mpgFree ; Mark mapping free
6414 stw r3,mpFlags(r31) ; Update flags
6416 li r31,mapRtNotFnd ; Didn't succeed
6418 grsRelPhy: mr r3,r24 ; r3 <- physent addr
6419 bl mapPhysUnlock ; Unlock physent chain
6421 grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6422 bl sxlkUnlock ; Release host pmap search lock
6424 grsRtn: mr r3,r31 ; r3 <- result code
6425 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6426 mtmsr r25 ; Restore 'rupts, translation
6427 isync ; Throw a small wrench into the pipeline
6428 b grsPopFrame ; Nothing to do now but pop a frame and return
6429 grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6431 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6432 ; Get caller's return address
6433 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6434 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6435 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6436 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6437 mtlr r0 ; Prepare return address
6438 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6439 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6440 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6441 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6442 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6443 lwz r1,0(r1) ; Pop stack frame
6444 blr ; Return to caller
6448 li r31,mapRtNotFnd ; Could not locate requested mapping
6449 b grsRelPmap ; Exit through host pmap search lock release
6453 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6454 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6455 li r3,failMapping ; The BOMB, Dmitri.
6456 sc ; The hydrogen bomb.
6460 ; Guest shadow assist -- add a guest mapping
6462 ; Adds a guest mapping.
6465 ; r3 : address of host pmap, 32-bit kernel virtual address
6466 ; r4 : address of guest pmap, 32-bit kernel virtual address
6467 ; r5 : guest virtual address, high-order 32 bits
6468 ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6469 ; r7 : new mapping's flags
6470 ; r8 : physical address, 32-bit page number
6472 ; Non-volatile register usage:
6473 ; r22 : hash group's physical address
6474 ; r23 : VMM extension block's physical address
6475 ; r24 : mapping's flags
6476 ; r25 : caller's msr image from mapSetUp
6477 ; r26 : physent physical address
6478 ; r27 : host pmap physical address
6479 ; r28 : guest pmap physical address
6480 ; r29 : physical address, 32-bit 4k-page number
6481 ; r30 : guest virtual address
6482 ; r31 : gva->phys mapping's physical address
6486 .globl EXT(hw_add_map_gv)
6491 #define gadStackSize ((31-22+1)*4)+4
6493 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6494 ; Mint a new stack frame
6495 mflr r0 ; Get caller's return address
6496 mfsprg r11,2 ; Get feature flags
6497 mtcrf 0x02,r11 ; Insert feature flags into cr6
6498 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6499 ; Save caller's return address
6500 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6501 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6502 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6503 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6504 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6505 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6506 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6507 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6508 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6509 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6511 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6512 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6513 mr r24,r7 ; Copy guest mapping's flags
6514 mr r29,r8 ; Copy target frame's physical address
6516 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6517 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6518 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6519 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6520 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6521 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6522 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6523 srwi r11,r30,12 ; Form shadow hash:
6524 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6525 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6526 ; Form index offset from hash page number
6527 add r22,r22,r10 ; r22 <- hash page index entry
6528 lwz r22,4(r22) ; r22 <- hash page paddr
6529 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6530 ; r22 <- hash group paddr
6531 b gadStart ; Get to it
6533 gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6534 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6535 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6536 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6537 srwi r11,r30,12 ; Form shadow hash:
6538 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6539 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6540 ; Form index offset from hash page number
6541 add r22,r22,r10 ; r22 <- hash page index entry
6542 ld r22,0(r22) ; r22 <- hash page paddr
6543 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6544 ; r22 <- hash group paddr
6546 gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6547 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6548 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6549 mr r25,r11 ; Save caller's msr image
6551 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6552 bl sxlkExclusive ; Get lock exlusive
6554 mr r31,r22 ; Prepare to search this group
6555 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6556 mtctr r0 ; in this group
6557 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6559 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6560 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6561 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6562 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6563 b gad32SrchLp ; Let the search begin!
6567 mr r6,r3 ; r6 <- current mapping slot's flags
6568 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6569 mr r7,r4 ; r7 <- current mapping slot's space ID
6570 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6571 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6572 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6573 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6574 xor r7,r7,r9 ; Compare space ID
6575 or r0,r11,r7 ; r0 <- !(!free && space match)
6576 xor r8,r8,r12 ; Compare virtual address
6577 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6578 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6580 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6581 bdnz gad32SrchLp ; Iterate
6583 mr r6,r3 ; r6 <- current mapping slot's flags
6584 clrrwi r5,r5,12 ; Remove flags from virtual address
6585 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6586 xor r4,r4,r9 ; Compare space ID
6587 or r0,r11,r4 ; r0 <- !(!free && && space match)
6588 xor r5,r5,r12 ; Compare virtual address
6589 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6590 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6591 b gadScan ; No joy in our hash group
6594 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6595 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6596 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6597 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6598 b gad64SrchLp ; Let the search begin!
6602 mr r6,r3 ; r6 <- current mapping slot's flags
6603 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6604 mr r7,r4 ; r7 <- current mapping slot's space ID
6605 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6606 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6607 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6608 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6609 xor r7,r7,r9 ; Compare space ID
6610 or r0,r11,r7 ; r0 <- !(!free && space match)
6611 xor r8,r8,r12 ; Compare virtual address
6612 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6613 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6615 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6616 bdnz gad64SrchLp ; Iterate
6618 mr r6,r3 ; r6 <- current mapping slot's flags
6619 clrrdi r5,r5,12 ; Remove flags from virtual address
6620 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6621 xor r4,r4,r9 ; Compare space ID
6622 or r0,r11,r4 ; r0 <- !(!free && && space match)
6623 xor r5,r5,r12 ; Compare virtual address
6624 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6625 bne gadScan ; No joy in our hash group
6626 b gadRelPmap ; Hit, let upper-level redrive sort it out
6628 gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6629 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6630 ; Prepare to address slot at cursor
6631 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6632 mtctr r0 ; in this group
6633 or r2,r22,r12 ; r2 <- 1st mapping to search
6634 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6635 li r11,0 ; No dormant entries found yet
6636 b gadScanLoop ; Let the search begin!
6640 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6641 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6642 ; Trim off any carry, wrapping into slot number range
6643 mr r31,r2 ; r31 <- current mapping's address
6644 or r2,r22,r12 ; r2 <- next mapping to search
6645 mr r6,r3 ; r6 <- current mapping slot's flags
6646 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6647 rlwinm. r0,r6,0,mpgFree ; Test free flag
6648 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6649 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6650 xori r0,r0,mpgDormant ; Invert dormant flag
6651 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6652 bne gadNotDorm ; Not dormant or we've already seen one
6653 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6654 gadNotDorm: bdnz gadScanLoop ; Iterate
6656 mr r31,r2 ; r31 <- final mapping's address
6657 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6658 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6659 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6660 xori r0,r0,mpgDormant ; Invert dormant flag
6661 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6662 bne gadCkDormant ; Not dormant or we've already seen one
6663 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6666 mr. r31,r11 ; Get dormant mapping, if any, and test
6667 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6670 lbz r12,mpgCursor(r22) ; Get group's cursor
6671 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6672 ; Prepare to address slot at cursor
6673 or r31,r22,r12 ; r31 <- address of mapping to steal
6675 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6676 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6677 ; r31 <- mapping's physical address
6678 ; r3 -> PTE slot physical address
6679 ; r4 -> High-order 32 bits of PTE
6680 ; r5 -> Low-order 32 bits of PTE
6682 ; r7 -> PCA physical address
6683 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6684 b gadFreePTE ; Join 64-bit path to release the PTE
6685 gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6686 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6687 gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6688 beq- gadUpCursor ; No valid PTE, we're almost done
6689 lis r0,0x8000 ; Prepare free bit for this slot
6690 srw r0,r0,r2 ; Position free bit
6691 or r6,r6,r0 ; Set it in our PCA image
6692 lwz r8,mpPte(r31) ; Get PTE pointer
6693 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6694 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6695 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6696 stw r6,0(r7) ; Update PCA and unlock the PTEG
6699 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6700 ; Recover slot number from stolen mapping's address
6701 addi r12,r12,1 ; Increment slot number
6702 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6703 stb r12,mpgCursor(r22) ; Update group's cursor
6705 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6706 bl mapFindLockPN ; Find 'n' lock this page's physent
6707 mr. r26,r3 ; Got lock on our physent?
6708 beq-- gadBadPLock ; No, time to bail out
6710 crset cr1_eq ; cr1_eq <- previous link is the anchor
6711 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6712 la r11,ppLink+4(r26) ; Point to chain anchor
6713 lwz r9,ppLink+4(r26) ; Get chain anchor
6714 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6715 gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6716 cmplw r9,r31 ; Is this the mapping to remove?
6717 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6718 bne gadRemNext ; No, chain onward
6719 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6720 stw r8,0(r11) ; Unchain gpv->phys mapping
6721 b gadDelDone ; Finish deleting mapping
6723 lwarx r0,0,r11 ; Get previous link
6724 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6725 stwcx. r0,0,r11 ; Update previous link
6726 bne- gadRemRetry ; Lost reservation, retry
6727 b gadDelDone ; Finish deleting mapping
6729 gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6730 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6731 mr. r9,r8 ; Does next entry exist?
6732 b gadRemLoop ; Carry on
6735 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6736 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6737 la r11,ppLink(r26) ; Point to chain anchor
6738 ld r9,ppLink(r26) ; Get chain anchor
6739 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6740 gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6741 cmpld r9,r31 ; Is this the mapping to remove?
6742 ld r8,mpAlias(r9) ; Get forward chain pinter
6743 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6744 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6745 std r8,0(r11) ; Unchain gpv->phys mapping
6746 b gadDelDone ; Finish deleting mapping
6747 gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6748 and r0,r0,r7 ; Get flags
6749 or r0,r0,r8 ; Insert new forward pointer
6750 stdcx. r0,0,r11 ; Slam it back in
6751 bne-- gadRem64Rt ; Lost reservation, retry
6752 b gadDelDone ; Finish deleting mapping
6756 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6757 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6758 mr. r9,r8 ; Does next entry exist?
6759 b gadRem64Lp ; Carry on
6762 mr r3,r26 ; Get physent address
6763 bl mapPhysUnlock ; Unlock physent chain
6766 lwz r12,pmapSpace(r28) ; Get guest space id number
6767 li r2,0 ; Get a zero
6768 stw r24,mpFlags(r31) ; Set mapping's flags
6769 sth r12,mpSpace(r31) ; Set mapping's space id number
6770 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6771 stw r29,mpPAddr(r31) ; Set mapping's physical address
6772 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6773 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6774 b gadChain ; Continue with chaining mapping to physent
6775 gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6777 gadChain: mr r3,r29 ; r3 <- physical frame address
6778 bl mapFindLockPN ; Find 'n' lock this page's physent
6779 mr. r26,r3 ; Got lock on our physent?
6780 beq-- gadBadPLock ; No, time to bail out
6782 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6783 lwz r12,ppLink+4(r26) ; Get forward chain
6784 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6785 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6786 stw r11,mpAlias+4(r31) ; New mapping will head chain
6787 stw r12,ppLink+4(r26) ; Point physent to new mapping
6788 b gadFinish ; All over now...
6790 gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6791 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6792 ld r12,ppLink(r26) ; Get forward chain
6793 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6794 and r12,r12,r7 ; Isolate pointer's flags
6795 or r12,r12,r31 ; Insert new mapping's address forming pointer
6796 std r11,mpAlias(r31) ; New mapping will head chain
6797 std r12,ppLink(r26) ; Point physent to new mapping
6799 gadFinish: eieio ; Ensure new mapping is completely visible
6801 gadRelPhy: mr r3,r26 ; r3 <- physent addr
6802 bl mapPhysUnlock ; Unlock physent chain
6804 gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6805 bl sxlkUnlock ; Release host pmap search lock
6807 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6808 mtmsr r25 ; Restore 'rupts, translation
6809 isync ; Throw a small wrench into the pipeline
6810 b gadPopFrame ; Nothing to do now but pop a frame and return
6811 gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6813 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6814 ; Get caller's return address
6815 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6816 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6817 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6818 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6819 mtlr r0 ; Prepare return address
6820 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6821 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6822 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6823 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6824 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6825 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6826 lwz r1,0(r1) ; Pop stack frame
6827 blr ; Return to caller
6831 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6832 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6833 li r3,failMapping ; The BOMB, Dmitri.
6834 sc ; The hydrogen bomb.
6838 ; Guest shadow assist -- supend a guest mapping
6840 ; Suspends a guest mapping.
6843 ; r3 : address of host pmap, 32-bit kernel virtual address
6844 ; r4 : address of guest pmap, 32-bit kernel virtual address
6845 ; r5 : guest virtual address, high-order 32 bits
6846 ; r6 : guest virtual address, low-order 32 bits
6848 ; Non-volatile register usage:
6849 ; r26 : VMM extension block's physical address
6850 ; r27 : host pmap physical address
6851 ; r28 : guest pmap physical address
6852 ; r29 : caller's msr image from mapSetUp
6853 ; r30 : guest virtual address
6854 ; r31 : gva->phys mapping's physical address
6858 .globl EXT(hw_susp_map_gv)
6860 LEXT(hw_susp_map_gv)
6862 #define gsuStackSize ((31-26+1)*4)+4
6864 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6865 ; Mint a new stack frame
6866 mflr r0 ; Get caller's return address
6867 mfsprg r11,2 ; Get feature flags
6868 mtcrf 0x02,r11 ; Insert feature flags into cr6
6869 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6870 ; Save caller's return address
6871 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6872 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6873 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6874 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6875 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6876 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6878 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6880 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6881 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6882 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6884 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6885 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6886 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6887 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6888 srwi r11,r30,12 ; Form shadow hash:
6889 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6890 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6891 ; Form index offset from hash page number
6892 add r31,r31,r10 ; r31 <- hash page index entry
6893 lwz r31,4(r31) ; r31 <- hash page paddr
6894 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6895 ; r31 <- hash group paddr
6896 b gsuStart ; Get to it
6897 gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6898 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6899 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6900 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6901 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6902 srwi r11,r30,12 ; Form shadow hash:
6903 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6904 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6905 ; Form index offset from hash page number
6906 add r31,r31,r10 ; r31 <- hash page index entry
6907 ld r31,0(r31) ; r31 <- hash page paddr
6908 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6909 ; r31 <- hash group paddr
6911 gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6912 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6913 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6914 mr r29,r11 ; Save caller's msr image
6916 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6917 bl sxlkExclusive ; Get lock exclusive
6919 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6920 mtctr r0 ; in this group
6921 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6923 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6924 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6925 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6926 b gsu32SrchLp ; Let the search begin!
6930 mr r6,r3 ; r6 <- current mapping slot's flags
6931 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6932 mr r7,r4 ; r7 <- current mapping slot's space ID
6933 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6934 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6935 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6936 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6937 xor r7,r7,r9 ; Compare space ID
6938 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6939 xor r8,r8,r30 ; Compare virtual address
6940 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6941 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6943 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6944 bdnz gsu32SrchLp ; Iterate
6946 mr r6,r3 ; r6 <- current mapping slot's flags
6947 clrrwi r5,r5,12 ; Remove flags from virtual address
6948 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6949 xor r4,r4,r9 ; Compare space ID
6950 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6951 xor r5,r5,r30 ; Compare virtual address
6952 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6953 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6954 b gsuSrchMiss ; No joy in our hash group
6957 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6958 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6959 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6960 b gsu64SrchLp ; Let the search begin!
6964 mr r6,r3 ; r6 <- current mapping slot's flags
6965 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6966 mr r7,r4 ; r7 <- current mapping slot's space ID
6967 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6968 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6969 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6970 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6971 xor r7,r7,r9 ; Compare space ID
6972 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6973 xor r8,r8,r30 ; Compare virtual address
6974 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6975 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6977 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6978 bdnz gsu64SrchLp ; Iterate
6980 mr r6,r3 ; r6 <- current mapping slot's flags
6981 clrrdi r5,r5,12 ; Remove flags from virtual address
6982 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6983 xor r4,r4,r9 ; Compare space ID
6984 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6985 xor r5,r5,r30 ; Compare virtual address
6986 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6987 bne gsuSrchMiss ; No joy in our hash group
6990 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
6991 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6992 ; r31 <- mapping's physical address
6993 ; r3 -> PTE slot physical address
6994 ; r4 -> High-order 32 bits of PTE
6995 ; r5 -> Low-order 32 bits of PTE
6997 ; r7 -> PCA physical address
6998 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6999 b gsuFreePTE ; Join 64-bit path to release the PTE
7000 gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7001 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7002 gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
7003 beq- gsuNoPTE ; No valid PTE, we're almost done
7004 lis r0,0x8000 ; Prepare free bit for this slot
7005 srw r0,r0,r2 ; Position free bit
7006 or r6,r6,r0 ; Set it in our PCA image
7007 lwz r8,mpPte(r31) ; Get PTE pointer
7008 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7009 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7010 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7011 stw r6,0(r7) ; Update PCA and unlock the PTEG
7013 gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7014 ori r3,r3,mpgDormant ; Mark entry dormant
7015 stw r3,mpFlags(r31) ; Save updated flags
7016 eieio ; Ensure update is visible when we unlock
7019 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7020 bl sxlkUnlock ; Release host pmap search lock
7022 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7023 mtmsr r29 ; Restore 'rupts, translation
7024 isync ; Throw a small wrench into the pipeline
7025 b gsuPopFrame ; Nothing to do now but pop a frame and return
7026 gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7028 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7029 ; Get caller's return address
7030 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7031 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7032 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7033 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7034 mtlr r0 ; Prepare return address
7035 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7036 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7037 lwz r1,0(r1) ; Pop stack frame
7038 blr ; Return to caller
7041 ; Guest shadow assist -- test guest mapping reference and change bits
7043 ; Locates the specified guest mapping, and if it exists gathers its reference
7044 ; and change bit, optionallyÊresetting them.
7047 ; r3 : address of host pmap, 32-bit kernel virtual address
7048 ; r4 : address of guest pmap, 32-bit kernel virtual address
7049 ; r5 : guest virtual address, high-order 32 bits
7050 ; r6 : guest virtual address, low-order 32 bits
7051 ; r7 : reset boolean
7053 ; Non-volatile register usage:
7054 ; r24 : VMM extension block's physical address
7055 ; r25 : return code (w/reference and change bits)
7056 ; r26 : reset boolean
7057 ; r27 : host pmap physical address
7058 ; r28 : guest pmap physical address
7059 ; r29 : caller's msr image from mapSetUp
7060 ; r30 : guest virtual address
7061 ; r31 : gva->phys mapping's physical address
7065 .globl EXT(hw_test_rc_gv)
7069 #define gtdStackSize ((31-24+1)*4)+4
7071 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7072 ; Mint a new stack frame
7073 mflr r0 ; Get caller's return address
7074 mfsprg r11,2 ; Get feature flags
7075 mtcrf 0x02,r11 ; Insert feature flags into cr6
7076 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7077 ; Save caller's return address
7078 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7079 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7080 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7081 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7082 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7083 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7084 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7085 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7087 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
7089 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7090 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
7092 bt++ pf64Bitb,gtd64Salt ; Test for 64-bit machine
7094 lwz r24,pmapVmmExtPhys+4(r3) ; r24 <- VMM pmap extension block paddr
7095 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
7096 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
7097 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7098 srwi r11,r30,12 ; Form shadow hash:
7099 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7100 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7101 ; Form index offset from hash page number
7102 add r31,r31,r10 ; r31 <- hash page index entry
7103 lwz r31,4(r31) ; r31 <- hash page paddr
7104 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7105 ; r31 <- hash group paddr
7106 b gtdStart ; Get to it
7108 gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7109 ld r24,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7110 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
7111 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
7112 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7113 srwi r11,r30,12 ; Form shadow hash:
7114 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7115 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7116 ; Form index offset from hash page number
7117 add r31,r31,r10 ; r31 <- hash page index entry
7118 ld r31,0(r31) ; r31 <- hash page paddr
7119 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7120 ; r31 <- hash group paddr
7122 gtdStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
7123 xor r28,r4,r28 ; Convert guest pmap_t virt->real
7124 mr r26,r7 ; Save reset boolean
7125 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7126 mr r29,r11 ; Save caller's msr image
7128 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7129 bl sxlkExclusive ; Get lock exclusive
7131 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7132 mtctr r0 ; in this group
7133 bt++ pf64Bitb,gtd64Search ; Test for 64-bit machine
7135 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7136 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7137 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7138 b gtd32SrchLp ; Let the search begin!
7142 mr r6,r3 ; r6 <- current mapping slot's flags
7143 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7144 mr r7,r4 ; r7 <- current mapping slot's space ID
7145 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7146 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7147 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7148 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7149 xor r7,r7,r9 ; Compare space ID
7150 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7151 xor r8,r8,r30 ; Compare virtual address
7152 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7153 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7155 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7156 bdnz gtd32SrchLp ; Iterate
7158 mr r6,r3 ; r6 <- current mapping slot's flags
7159 clrrwi r5,r5,12 ; Remove flags from virtual address
7160 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7161 xor r4,r4,r9 ; Compare space ID
7162 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7163 xor r5,r5,r30 ; Compare virtual address
7164 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7165 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7166 b gtdSrchMiss ; No joy in our hash group
7169 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7170 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7171 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7172 b gtd64SrchLp ; Let the search begin!
7176 mr r6,r3 ; r6 <- current mapping slot's flags
7177 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7178 mr r7,r4 ; r7 <- current mapping slot's space ID
7179 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7180 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7181 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7182 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7183 xor r7,r7,r9 ; Compare space ID
7184 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7185 xor r8,r8,r30 ; Compare virtual address
7186 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7187 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7189 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7190 bdnz gtd64SrchLp ; Iterate
7192 mr r6,r3 ; r6 <- current mapping slot's flags
7193 clrrdi r5,r5,12 ; Remove flags from virtual address
7194 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7195 xor r4,r4,r9 ; Compare space ID
7196 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7197 xor r5,r5,r30 ; Compare virtual address
7198 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7199 bne gtdSrchMiss ; No joy in our hash group
7202 bt++ pf64Bitb,gtdDo64 ; Split for 64 bit
7204 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
7206 cmplwi cr1,r26,0 ; Do we want to clear RC?
7207 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7208 mr. r3,r3 ; Was there a previously valid PTE?
7209 li r0,lo16(mpR|mpC) ; Get bits to clear
7211 and r25,r5,r0 ; Copy RC bits into result
7212 beq++ cr1,gtdNoClr32 ; Nope...
7214 andc r12,r12,r0 ; Clear mapping copy of RC
7215 andc r5,r5,r0 ; Clear PTE copy of RC
7216 sth r12,mpVAddr+6(r31) ; Set the new RC in mapping
7218 gtdNoClr32: beq-- gtdNoOld32 ; No previously valid PTE...
7220 sth r5,6(r3) ; Store updated RC in PTE
7221 eieio ; Make sure we do not reorder
7222 stw r4,0(r3) ; Revalidate the PTE
7224 eieio ; Make sure all updates come first
7225 stw r6,0(r7) ; Unlock PCA
7227 gtdNoOld32: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7228 bl sxlkUnlock ; Unlock the search list
7229 b gtdR32 ; Join common...
7234 gtdDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
7236 cmplwi cr1,r26,0 ; Do we want to clear RC?
7237 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7238 mr. r3,r3 ; Was there a previously valid PTE?
7239 li r0,lo16(mpR|mpC) ; Get bits to clear
7241 and r25,r5,r0 ; Copy RC bits into result
7242 beq++ cr1,gtdNoClr64 ; Nope...
7244 andc r12,r12,r0 ; Clear mapping copy of RC
7245 andc r5,r5,r0 ; Clear PTE copy of RC
7246 sth r12,mpVAddr+6(r31) ; Set the new RC
7248 gtdNoClr64: beq-- gtdNoOld64 ; Nope, no pevious pte...
7250 sth r5,14(r3) ; Store updated RC
7251 eieio ; Make sure we do not reorder
7252 std r4,0(r3) ; Revalidate the PTE
7254 eieio ; Make sure all updates come first
7255 stw r6,0(r7) ; Unlock PCA
7257 gtdNoOld64: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7258 bl sxlkUnlock ; Unlock the search list
7259 b gtdR64 ; Join common...
7262 la r3,pmapSXlk(r27) ; Point to the pmap search lock
7263 bl sxlkUnlock ; Unlock the search list
7264 li r25,mapRtNotFnd ; Get ready to return not found
7265 bt++ pf64Bitb,gtdR64 ; Test for 64-bit machine
7267 gtdR32: mtmsr r29 ; Restore caller's msr image
7271 gtdR64: mtmsrd r29 ; Restore caller's msr image
7273 gtdEpilog: lwz r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7274 ; Get caller's return address
7275 mr r3,r25 ; Get return code
7276 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7277 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7278 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7279 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7280 mtlr r0 ; Prepare return address
7281 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7282 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7283 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7284 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7285 lwz r1,0(r1) ; Pop stack frame
7286 blr ; Return to caller
7289 ; Guest shadow assist -- convert guest to host virtual address
7291 ; Locates the specified guest mapping, and if it exists locates the
7292 ; first mapping belonging to its host on the physical chain and returns
7293 ; its virtual address.
7295 ; Note that if there are multiple mappings belonging to this host
7296 ; chained to the physent to which the guest mapping is chained, then
7297 ; host virtual aliases exist for this physical address. If host aliases
7298 ; exist, then we select the first on the physent chain, making it
7299 ; unpredictable which of the two or more possible host virtual addresses
7303 ; r3 : address of guest pmap, 32-bit kernel virtual address
7304 ; r4 : guest virtual address, high-order 32 bits
7305 ; r5 : guest virtual address, low-order 32 bits
7307 ; Non-volatile register usage:
7308 ; r24 : physent physical address
7309 ; r25 : VMM extension block's physical address
7310 ; r26 : host virtual address
7311 ; r27 : host pmap physical address
7312 ; r28 : guest pmap physical address
7313 ; r29 : caller's msr image from mapSetUp
7314 ; r30 : guest virtual address
7315 ; r31 : gva->phys mapping's physical address
7319 .globl EXT(hw_gva_to_hva)
7323 #define gthStackSize ((31-24+1)*4)+4
7325 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7326 ; Mint a new stack frame
7327 mflr r0 ; Get caller's return address
7328 mfsprg r11,2 ; Get feature flags
7329 mtcrf 0x02,r11 ; Insert feature flags into cr6
7330 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7331 ; Save caller's return address
7332 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7333 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7334 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7335 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7336 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7337 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7338 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7339 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7341 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7343 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7344 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7346 bt++ pf64Bitb,gth64Salt ; Test for 64-bit machine
7348 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7349 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7350 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7351 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7352 srwi r11,r30,12 ; Form shadow hash:
7353 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7354 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7355 ; Form index offset from hash page number
7356 add r31,r31,r10 ; r31 <- hash page index entry
7357 lwz r31,4(r31) ; r31 <- hash page paddr
7358 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7359 ; r31 <- hash group paddr
7360 b gthStart ; Get to it
7362 gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7363 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7364 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7365 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7366 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7367 srwi r11,r30,12 ; Form shadow hash:
7368 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7369 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7370 ; Form index offset from hash page number
7371 add r31,r31,r10 ; r31 <- hash page index entry
7372 ld r31,0(r31) ; r31 <- hash page paddr
7373 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7374 ; r31 <- hash group paddr
7376 gthStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7377 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7378 mr r29,r11 ; Save caller's msr image
7380 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7381 bl sxlkExclusive ; Get lock exclusive
7383 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7384 mtctr r0 ; in this group
7385 bt++ pf64Bitb,gth64Search ; Test for 64-bit machine
7387 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7388 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7389 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7390 b gth32SrchLp ; Let the search begin!
7394 mr r6,r3 ; r6 <- current mapping slot's flags
7395 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7396 mr r7,r4 ; r7 <- current mapping slot's space ID
7397 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7398 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7399 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7400 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7401 xor r7,r7,r9 ; Compare space ID
7402 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7403 xor r8,r8,r30 ; Compare virtual address
7404 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7405 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7407 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7408 bdnz gth32SrchLp ; Iterate
7410 mr r6,r3 ; r6 <- current mapping slot's flags
7411 clrrwi r5,r5,12 ; Remove flags from virtual address
7412 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7413 xor r4,r4,r9 ; Compare space ID
7414 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7415 xor r5,r5,r30 ; Compare virtual address
7416 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7417 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7418 b gthSrchMiss ; No joy in our hash group
7421 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7422 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7423 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7424 b gth64SrchLp ; Let the search begin!
7428 mr r6,r3 ; r6 <- current mapping slot's flags
7429 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7430 mr r7,r4 ; r7 <- current mapping slot's space ID
7431 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7432 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7433 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7434 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7435 xor r7,r7,r9 ; Compare space ID
7436 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7437 xor r8,r8,r30 ; Compare virtual address
7438 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7439 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7441 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7442 bdnz gth64SrchLp ; Iterate
7444 mr r6,r3 ; r6 <- current mapping slot's flags
7445 clrrdi r5,r5,12 ; Remove flags from virtual address
7446 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7447 xor r4,r4,r9 ; Compare space ID
7448 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7449 xor r5,r5,r30 ; Compare virtual address
7450 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7451 bne gthSrchMiss ; No joy in our hash group
7453 gthSrchHit: lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
7454 bl mapFindLockPN ; Find 'n' lock this page's physent
7455 mr. r24,r3 ; Got lock on our physent?
7456 beq-- gthBadPLock ; No, time to bail out
7458 bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
7460 lwz r9,ppLink+4(r24) ; Get first mapping on physent
7461 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7462 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
7463 gthPELoop: mr. r12,r9 ; Got a mapping to look at?
7464 beq- gthPEMiss ; Nope, we've missed hva->phys mapping
7465 lwz r7,mpFlags(r12) ; Get mapping's flags
7466 lhz r4,mpSpace(r12) ; Get mapping's space id number
7467 lwz r26,mpVAddr+4(r12) ; Get mapping's virtual address
7468 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
7470 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7471 rlwinm r26,r26,0,~mpHWFlags ; Bye-bye unsightly flags
7472 xori r0,r0,mpNormal ; Normal mapping?
7473 xor r4,r4,r6 ; Compare w/ host space id number
7474 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7476 b gthPELoop ; Iterate
7478 gthPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
7479 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
7480 ld r9,ppLink(r24) ; Get first mapping on physent
7481 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7482 andc r9,r9,r0 ; Cleanup mapping pointer
7483 gthPELp64: mr. r12,r9 ; Got a mapping to look at?
7484 beq-- gthPEMiss ; Nope, we've missed hva->phys mapping
7485 lwz r7,mpFlags(r12) ; Get mapping's flags
7486 lhz r4,mpSpace(r12) ; Get mapping's space id number
7487 ld r26,mpVAddr(r12) ; Get mapping's virtual address
7488 ld r9,mpAlias(r12) ; Next mapping physent alias chain
7489 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7490 rldicr r26,r26,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
7491 xori r0,r0,mpNormal ; Normal mapping?
7492 xor r4,r4,r6 ; Compare w/ host space id number
7493 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7495 b gthPELp64 ; Iterate
7498 gthPEMiss: mr r3,r24 ; Get physent's address
7499 bl mapPhysUnlock ; Unlock physent chain
7501 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7502 bl sxlkUnlock ; Release host pmap search lock
7503 li r3,-1 ; Return 64-bit -1
7505 bt++ pf64Bitb,gthEpi64 ; Take 64-bit exit
7506 b gthEpi32 ; Take 32-bit exit
7509 gthPEHit: mr r3,r24 ; Get physent's address
7510 bl mapPhysUnlock ; Unlock physent chain
7511 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7512 bl sxlkUnlock ; Release host pmap search lock
7514 bt++ pf64Bitb,gthR64 ; Test for 64-bit machine
7516 gthR32: li r3,0 ; High-order 32 bits host virtual address
7517 mr r4,r26 ; Low-order 32 bits host virtual address
7518 gthEpi32: mtmsr r29 ; Restore caller's msr image
7523 gthR64: srdi r3,r26,32 ; High-order 32 bits host virtual address
7524 clrldi r4,r26,32 ; Low-order 32 bits host virtual address
7525 gthEpi64: mtmsrd r29 ; Restore caller's msr image
7527 gthEpilog: lwz r0,(FM_ALIGN(gthStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7528 ; Get caller's return address
7529 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7530 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7531 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7532 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7533 mtlr r0 ; Prepare return address
7534 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7535 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7536 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7537 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7538 lwz r1,0(r1) ; Pop stack frame
7539 blr ; Return to caller
7542 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
7543 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
7544 li r3,failMapping ; The BOMB, Dmitri.
7545 sc ; The hydrogen bomb.
7549 ; Guest shadow assist -- find a guest mapping
7551 ; Locates the specified guest mapping, and if it exists returns a copy
7555 ; r3 : address of guest pmap, 32-bit kernel virtual address
7556 ; r4 : guest virtual address, high-order 32 bits
7557 ; r5 : guest virtual address, low-order 32 bits
7558 ; r6 : 32 byte copy area, 32-bit kernel virtual address
7560 ; Non-volatile register usage:
7561 ; r25 : VMM extension block's physical address
7562 ; r26 : copy area virtual address
7563 ; r27 : host pmap physical address
7564 ; r28 : guest pmap physical address
7565 ; r29 : caller's msr image from mapSetUp
7566 ; r30 : guest virtual address
7567 ; r31 : gva->phys mapping's physical address
7571 .globl EXT(hw_find_map_gv)
7573 LEXT(hw_find_map_gv)
7575 #define gfmStackSize ((31-25+1)*4)+4
7577 stwu r1,-(FM_ALIGN(gfmStackSize)+FM_SIZE)(r1)
7578 ; Mint a new stack frame
7579 mflr r0 ; Get caller's return address
7580 mfsprg r11,2 ; Get feature flags
7581 mtcrf 0x02,r11 ; Insert feature flags into cr6
7582 stw r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7583 ; Save caller's return address
7584 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7585 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7586 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7587 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7588 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7589 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7590 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7592 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7593 mr r26,r6 ; Copy copy buffer vaddr
7595 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7596 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7598 bt++ pf64Bitb,gfm64Salt ; Test for 64-bit machine
7600 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7601 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7602 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7603 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7604 srwi r11,r30,12 ; Form shadow hash:
7605 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7606 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7607 ; Form index offset from hash page number
7608 add r31,r31,r10 ; r31 <- hash page index entry
7609 lwz r31,4(r31) ; r31 <- hash page paddr
7610 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7611 ; r31 <- hash group paddr
7612 b gfmStart ; Get to it
7614 gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7615 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7616 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7617 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7618 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7619 srwi r11,r30,12 ; Form shadow hash:
7620 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7621 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7622 ; Form index offset from hash page number
7623 add r31,r31,r10 ; r31 <- hash page index entry
7624 ld r31,0(r31) ; r31 <- hash page paddr
7625 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7626 ; r31 <- hash group paddr
7628 gfmStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7629 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7630 mr r29,r11 ; Save caller's msr image
7632 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7633 bl sxlkExclusive ; Get lock exclusive
7635 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7636 mtctr r0 ; in this group
7637 bt++ pf64Bitb,gfm64Search ; Test for 64-bit machine
7639 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7640 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7641 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7642 b gfm32SrchLp ; Let the search begin!
7646 mr r6,r3 ; r6 <- current mapping slot's flags
7647 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7648 mr r7,r4 ; r7 <- current mapping slot's space ID
7649 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7650 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7651 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7652 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7653 xor r7,r7,r9 ; Compare space ID
7654 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7655 xor r8,r8,r30 ; Compare virtual address
7656 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7657 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7659 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7660 bdnz gfm32SrchLp ; Iterate
7662 mr r6,r3 ; r6 <- current mapping slot's flags
7663 clrrwi r5,r5,12 ; Remove flags from virtual address
7664 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7665 xor r4,r4,r9 ; Compare space ID
7666 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7667 xor r5,r5,r30 ; Compare virtual address
7668 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7669 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7670 b gfmSrchMiss ; No joy in our hash group
7673 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7674 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7675 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7676 b gfm64SrchLp ; Let the search begin!
7680 mr r6,r3 ; r6 <- current mapping slot's flags
7681 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7682 mr r7,r4 ; r7 <- current mapping slot's space ID
7683 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7684 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7685 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7686 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7687 xor r7,r7,r9 ; Compare space ID
7688 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7689 xor r8,r8,r30 ; Compare virtual address
7690 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7691 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7693 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7694 bdnz gfm64SrchLp ; Iterate
7696 mr r6,r3 ; r6 <- current mapping slot's flags
7697 clrrdi r5,r5,12 ; Remove flags from virtual address
7698 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7699 xor r4,r4,r9 ; Compare space ID
7700 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7701 xor r5,r5,r30 ; Compare virtual address
7702 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7703 bne gfmSrchMiss ; No joy in our hash group
7705 gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
7708 lwz r8,12(r31) ; +12
7709 lwz r9,16(r31) ; +16
7710 lwz r10,20(r31) ; +20
7711 lwz r11,24(r31) ; +24
7712 lwz r12,28(r31) ; +28
7714 li r31,mapRtOK ; Return found mapping
7716 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7717 bl sxlkUnlock ; Release host pmap search lock
7719 bt++ pf64Bitb,gfmEpi64 ; Test for 64-bit machine
7721 gfmEpi32: mtmsr r29 ; Restore caller's msr image
7722 isync ; A small wrench
7723 b gfmEpilog ; and a larger bubble
7726 gfmEpi64: mtmsrd r29 ; Restore caller's msr image
7728 gfmEpilog: mr. r3,r31 ; Copy/test mapping address
7729 beq gfmNotFound ; Skip copy if no mapping found
7731 stw r5,0(r26) ; Store 32 bytes of mapping into virtual
7734 stw r8,12(r26) ; +12
7735 stw r9,16(r26) ; +16
7736 stw r10,20(r26) ; +20
7737 stw r11,24(r26) ; +24
7738 stw r12,28(r26) ; +28
7741 lwz r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7742 ; Get caller's return address
7743 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7744 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7745 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7746 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7747 mtlr r0 ; Prepare return address
7748 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7749 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7750 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7751 lwz r1,0(r1) ; Pop stack frame
7752 blr ; Return to caller
7756 li r31,mapRtNotFnd ; Indicate mapping not found
7757 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7758 bl sxlkUnlock ; Release host pmap search lock
7759 bt++ pf64Bitb,gfmEpi64 ; Take 64-bit exit
7760 b gfmEpi32 ; Take 32-bit exit
7764 ; Guest shadow assist -- change guest page protection
7766 ; Locates the specified dormant mapping, and if it is active, changes its
7770 ; r3 : address of guest pmap, 32-bit kernel virtual address
7771 ; r4 : guest virtual address, high-order 32 bits
7772 ; r5 : guest virtual address, low-order 32 bits
7773 ; r6 : guest mapping protection code
7775 ; Non-volatile register usage:
7776 ; r25 : caller's msr image from mapSetUp
7777 ; r26 : guest mapping protection code
7778 ; r27 : host pmap physical address
7779 ; r28 : guest pmap physical address
7780 ; r29 : VMM extension block's physical address
7781 ; r30 : guest virtual address
7782 ; r31 : gva->phys mapping's physical address
7785 .globl EXT(hw_protect_gv)
7789 #define gcpStackSize ((31-24+1)*4)+4
7791 stwu r1,-(FM_ALIGN(gcpStackSize)+FM_SIZE)(r1)
7792 ; Mint a new stack frame
7793 mflr r0 ; Get caller's return address
7794 mfsprg r11,2 ; Get feature flags
7795 mtcrf 0x02,r11 ; Insert feature flags into cr6
7796 stw r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7797 ; Save caller's return address
7798 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7799 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7800 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7801 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7802 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7803 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7804 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7806 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7807 mr r26,r6 ; Copy guest mapping protection code
7809 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7810 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7811 bt++ pf64Bitb,gcp64Salt ; Handle 64-bit machine separately
7812 lwz r29,pmapVmmExtPhys+4(r3) ; r29 <- VMM pmap extension block paddr
7813 lwz r27,vmxHostPmapPhys+4(r11) ; r27 <- host pmap paddr
7814 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7815 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7816 srwi r11,r30,12 ; Form shadow hash:
7817 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7818 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7819 ; Form index offset from hash page number
7820 add r31,r31,r10 ; r31 <- hash page index entry
7821 lwz r31,4(r31) ; r31 <- hash page paddr
7822 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7823 ; r31 <- hash group paddr
7824 b gcpStart ; Get to it
7826 gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7827 ld r29,pmapVmmExtPhys(r3) ; r29 <- VMM pmap extension block paddr
7828 ld r27,vmxHostPmapPhys(r11) ; r27 <- host pmap paddr
7829 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7830 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7831 srwi r11,r30,12 ; Form shadow hash:
7832 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7833 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7834 ; Form index offset from hash page number
7835 add r31,r31,r10 ; r31 <- hash page index entry
7836 ld r31,0(r31) ; r31 <- hash page paddr
7837 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7838 ; r31 <- hash group paddr
7840 gcpStart: xor r28,r4,r28 ; Convert guest pmap_t virt->real
7841 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7842 mr r25,r11 ; Save caller's msr image
7844 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7845 bl sxlkExclusive ; Get lock exclusive
7847 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7848 mtctr r0 ; in this group
7849 bt++ pf64Bitb,gcp64Search ; Test for 64-bit machine
7851 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7852 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7853 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7854 b gcp32SrchLp ; Let the search begin!
7858 mr r6,r3 ; r6 <- current mapping slot's flags
7859 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7860 mr r7,r4 ; r7 <- current mapping slot's space ID
7861 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7862 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7863 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7864 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7865 xor r7,r7,r9 ; Compare space ID
7866 or r0,r11,r7 ; r0 <- free || dormant || !space match
7867 xor r8,r8,r30 ; Compare virtual address
7868 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7869 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7871 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7872 bdnz gcp32SrchLp ; Iterate
7874 mr r6,r3 ; r6 <- current mapping slot's flags
7875 clrrwi r5,r5,12 ; Remove flags from virtual address
7876 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7877 xor r4,r4,r9 ; Compare space ID
7878 or r0,r11,r4 ; r0 <- free || dormant || !space match
7879 xor r5,r5,r30 ; Compare virtual address
7880 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7881 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7882 b gcpSrchMiss ; No joy in our hash group
7885 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7886 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7887 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7888 b gcp64SrchLp ; Let the search begin!
7892 mr r6,r3 ; r6 <- current mapping slot's flags
7893 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7894 mr r7,r4 ; r7 <- current mapping slot's space ID
7895 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7896 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7897 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7898 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7899 xor r7,r7,r9 ; Compare space ID
7900 or r0,r11,r7 ; r0 <- free || dormant || !space match
7901 xor r8,r8,r30 ; Compare virtual address
7902 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7903 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7905 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7906 bdnz gcp64SrchLp ; Iterate
7908 mr r6,r3 ; r6 <- current mapping slot's flags
7909 clrrdi r5,r5,12 ; Remove flags from virtual address
7910 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7911 xor r4,r4,r9 ; Compare space ID
7912 or r0,r11,r4 ; r0 <- free || dormant || !space match
7913 xor r5,r5,r30 ; Compare virtual address
7914 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7915 bne gcpSrchMiss ; No joy in our hash group
7918 bt++ pf64Bitb,gcpDscon64 ; Handle 64-bit disconnect separately
7919 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
7920 ; r31 <- mapping's physical address
7921 ; r3 -> PTE slot physical address
7922 ; r4 -> High-order 32 bits of PTE
7923 ; r5 -> Low-order 32 bits of PTE
7925 ; r7 -> PCA physical address
7926 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7927 b gcpFreePTE ; Join 64-bit path to release the PTE
7928 gcpDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7929 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7930 gcpFreePTE: mr. r3,r3 ; Was there a valid PTE?
7931 beq- gcpSetKey ; No valid PTE, we're almost done
7932 lis r0,0x8000 ; Prepare free bit for this slot
7933 srw r0,r0,r2 ; Position free bit
7934 or r6,r6,r0 ; Set it in our PCA image
7935 lwz r8,mpPte(r31) ; Get PTE pointer
7936 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7937 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7938 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7939 stw r6,0(r7) ; Update PCA and unlock the PTEG
7941 gcpSetKey: lwz r0,mpVAddr+4(r31) ; Get va word containing protection bits
7942 rlwimi r0,r26,0,mpPP ; Insert new protection bits
7943 stw r0,mpVAddr+4(r31) ; Write 'em back
7944 eieio ; Ensure previous mapping updates are visible
7945 li r31,mapRtOK ; I'm a success
7947 gcpRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7948 bl sxlkUnlock ; Release host pmap search lock
7950 mr r3,r31 ; r3 <- result code
7951 bt++ pf64Bitb,gcpRtn64 ; Handle 64-bit separately
7952 mtmsr r25 ; Restore 'rupts, translation
7953 isync ; Throw a small wrench into the pipeline
7954 b gcpPopFrame ; Nothing to do now but pop a frame and return
7955 gcpRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
7957 lwz r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7958 ; Get caller's return address
7959 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7960 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7961 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7962 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7963 mtlr r0 ; Prepare return address
7964 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7965 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7966 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7967 lwz r1,0(r1) ; Pop stack frame
7968 blr ; Return to caller
7972 li r31,mapRtNotFnd ; Could not locate requested mapping
7973 b gcpRelPmap ; Exit through host pmap search lock release
7977 ; Find the physent based on a physical page and try to lock it (but not too hard)
7978 ; Note that this table always has an entry that with a 0 table pointer at the end
7980 ; R3 contains ppnum on entry
7981 ; R3 is 0 if no entry was found
7982 ; R3 is physent if found
7983 ; cr0_eq is true if lock was obtained or there was no entry to lock
7984 ; cr0_eq is false of there was an entry and it was locked
7990 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7991 mr r2,r3 ; Save our target
7992 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7994 mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
7995 lwz r5,mrStart(r9) ; Get start of table entry
7996 lwz r0,mrEnd(r9) ; Get end of table entry
7997 addi r9,r9,mrSize ; Point to the next slot
7998 cmplwi cr2,r3,0 ; Are we at the end of the table?
7999 cmplw r2,r5 ; See if we are in this table
8000 cmplw cr1,r2,r0 ; Check end also
8001 sub r4,r2,r5 ; Calculate index to physical entry
8002 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
8003 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
8004 slwi r4,r4,3 ; Get offset to physical entry
8006 blt-- mapFindPhz ; Did not find it...
8008 add r3,r3,r4 ; Point right to the slot
8010 mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
8011 rlwinm. r0,r2,0,0,0 ; Is it locked?
8012 bnelr-- ; Yes it is...
8014 lwarx r2,0,r3 ; Get the lock
8015 rlwinm. r0,r2,0,0,0 ; Is it locked?
8016 oris r0,r2,0x8000 ; Set the lock bit
8017 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
8018 stwcx. r0,0,r3 ; Try to stuff it back...
8019 bne-- mapFindOv ; Collision, try again...
8020 isync ; Clear any speculations
8023 mapFindKl: li r2,lgKillResv ; Killing field
8024 stwcx. r2,0,r2 ; Trash reservation...
8025 crclr cr0_eq ; Make sure we do not think we got the lock
8028 mapFindNo: crset cr0_eq ; Make sure that we set this
8029 li r3,0 ; Show that we did not find it
8032 ; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
8034 ; How the pmap cache lookup works:
8036 ; We use a combination of three things: a mask of valid entries, a sub-tag, and the
8037 ; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
8038 ; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
8039 ; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
8040 ; entry contains the full 36 bit ESID.
8042 ; The purpose of the sub-tag is to limit the number of searches necessary when looking
8043 ; for an existing cache entry. Because there are 16 slots in the cache, we could end up
8044 ; searching all 16 if an match is not found.
8046 ; Essentially, we will search only the slots that have a valid entry and whose sub-tag
8047 ; matches. More than likely, we will eliminate almost all of the searches.
8051 ; R4 = ESID high half
8052 ; R5 = ESID low half
8055 ; R3 = pmap cache slot if found, 0 if not
8056 ; R10 = pmapCCtl address
8057 ; R11 = pmapCCtl image
8058 ; pmapCCtl locked on exit
8064 la r10,pmapCCtl(r3) ; Point to the segment cache control
8067 lwarx r11,0,r10 ; Get the segment cache control value
8068 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8069 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
8070 bne-- pmapCacheLookur ; Nope...
8071 stwcx. r0,0,r10 ; Try to take the lock
8072 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
8074 isync ; Make sure we get reservation first
8075 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
8076 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
8077 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
8078 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
8079 lis r8,0x8888 ; Get some eights
8080 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
8081 ori r8,r8,0x8888 ; Fill the rest with eights
8083 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
8084 eqv r9,r9,r5 ; Get 0xF where we hit in top half
8086 rlwinm r2,r10,1,0,30 ; Shift over 1
8087 rlwinm r0,r9,1,0,30 ; Shift over 1
8088 and r2,r2,r10 ; AND the even/odd pair into the even
8089 and r0,r0,r9 ; AND the even/odd pair into the even
8090 rlwinm r10,r2,2,0,28 ; Shift over 2
8091 rlwinm r9,r0,2,0,28 ; Shift over 2
8092 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8093 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8095 and r10,r10,r8 ; Clear out extras
8096 and r9,r9,r8 ; Clear out extras
8098 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
8099 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
8100 or r10,r0,r10 ; Merge them
8101 or r9,r2,r9 ; Merge them
8102 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
8103 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
8104 or r10,r0,r10 ; Merge them
8105 or r9,r2,r9 ; Merge them
8106 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
8107 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
8108 not r6,r11 ; Turn invalid into valid
8109 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
8111 la r10,pmapSegCache(r3) ; Point at the cache slots
8112 and. r6,r9,r6 ; Get mask of valid and hit
8114 li r3,0 ; Assume not found
8115 oris r0,r0,0x8000 ; Start a mask
8116 beqlr++ ; Leave, should usually be no hits...
8118 pclNextEnt: cntlzw r5,r6 ; Find an in use one
8119 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
8120 rlwinm r7,r5,4,0,27 ; Index to the cache entry
8121 srw r2,r0,r5 ; Get validity mask bit
8122 add r7,r7,r10 ; Point to the cache slot
8123 andc r6,r6,r2 ; Clear the validity bit we just tried
8124 bgelr-- cr1 ; Leave if there are no more to check...
8126 lwz r5,sgcESID(r7) ; Get the top half
8128 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
8130 bne++ pclNextEnt ; Nope, try again...
8132 mr r3,r7 ; Point to the slot
8138 li r11,lgKillResv ; The killing spot
8139 stwcx. r11,0,r11 ; Kill the reservation
8142 lwz r11,pmapCCtl(r3) ; Get the segment cache control
8143 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8144 beq++ pmapCacheLookup ; Nope...
8145 b pmapCacheLookus ; Yup, keep waiting...
8149 ; mapMergeRC -- Given a physical mapping address in R31, locate its
8150 ; connected PTE (if any) and merge the PTE referenced and changed bits
8151 ; into the mapping and physent.
8157 lwz r0,mpPte(r31) ; Grab the PTE offset
8158 mfsdr1 r7 ; Get the pointer to the hash table
8159 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8160 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8161 andi. r3,r0,mpHValid ; Is there a possible PTE?
8162 srwi r7,r0,4 ; Convert to PCA units
8163 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8164 mflr r2 ; Save the return
8165 subfic r7,r7,-4 ; Convert to -4 based negative index
8166 add r7,r10,r7 ; Point to the PCA directly
8167 beqlr-- ; There was no PTE to start with...
8169 bl mapLockPteg ; Lock the PTEG
8171 lwz r0,mpPte(r31) ; Grab the PTE offset
8172 mtlr r2 ; Restore the LR
8173 andi. r3,r0,mpHValid ; Is there a possible PTE?
8174 beq- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8176 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8177 add r3,r3,r10 ; Point to actual PTE
8178 lwz r5,4(r3) ; Get the real part of the PTE
8179 srwi r10,r5,12 ; Change physical address to a ppnum
8181 mMNmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8182 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8183 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8184 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8185 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8186 add r11,r11,r8 ; Point to the bank table
8187 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8188 lwz r11,mrStart(r11) ; Get the start of bank
8189 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8190 addi r2,r2,4 ; Offset to last half of field
8191 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8192 sub r11,r10,r11 ; Get the index into the table
8193 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8195 mMmrgRC: lwarx r10,r11,r2 ; Get the master RC
8196 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8197 or r0,r0,r10 ; Merge in the new RC
8198 stwcx. r0,r11,r2 ; Try to stick it back
8199 bne-- mMmrgRC ; Try again if we collided...
8200 eieio ; Commit all updates
8203 stw r6,0(r7) ; Unlock PTEG
8207 ; 64-bit version of mapMergeRC
8212 lwz r0,mpPte(r31) ; Grab the PTE offset
8213 ld r5,mpVAddr(r31) ; Grab the virtual address
8214 mfsdr1 r7 ; Get the pointer to the hash table
8215 rldicr r10,r7,0,45 ; Clean up the hash table base
8216 andi. r3,r0,mpHValid ; Is there a possible PTE?
8217 srdi r7,r0,5 ; Convert to PCA units
8218 rldicr r7,r7,0,61 ; Clean up PCA
8219 subfic r7,r7,-4 ; Convert to -4 based negative index
8220 mflr r2 ; Save the return
8221 add r7,r10,r7 ; Point to the PCA directly
8222 beqlr-- ; There was no PTE to start with...
8224 bl mapLockPteg ; Lock the PTEG
8226 lwz r0,mpPte(r31) ; Grab the PTE offset again
8227 mtlr r2 ; Restore the LR
8228 andi. r3,r0,mpHValid ; Is there a possible PTE?
8229 beq-- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8231 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8232 add r3,r3,r10 ; Point to the actual PTE
8233 ld r5,8(r3) ; Get the real part
8234 srdi r10,r5,12 ; Change physical address to a ppnum
8235 b mMNmerge ; Join the common 32-64-bit code...
8239 ; This routine, given a mapping, will find and lock the PTEG
8240 ; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
8241 ; PTEG and return. In this case we will have undefined in R4
8242 ; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
8244 ; If the mapping is still valid, we will invalidate the PTE and merge
8245 ; the RC bits into the physent and also save them into the mapping.
8247 ; We then return with R3 pointing to the PTE slot, R4 is the
8248 ; top of the PTE and R5 is the bottom. R6 contains the PCA.
8249 ; R7 points to the PCA entry.
8251 ; Note that we should NEVER be called on a block or special mapping.
8252 ; We could do many bad things.
8258 lwz r0,mpPte(r31) ; Grab the PTE offset
8259 mfsdr1 r7 ; Get the pointer to the hash table
8260 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8261 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8262 andi. r3,r0,mpHValid ; Is there a possible PTE?
8263 srwi r7,r0,4 ; Convert to PCA units
8264 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8265 mflr r2 ; Save the return
8266 subfic r7,r7,-4 ; Convert to -4 based negative index
8267 add r7,r10,r7 ; Point to the PCA directly
8268 beqlr-- ; There was no PTE to start with...
8270 bl mapLockPteg ; Lock the PTEG
8272 lwz r0,mpPte(r31) ; Grab the PTE offset
8273 mtlr r2 ; Restore the LR
8274 andi. r3,r0,mpHValid ; Is there a possible PTE?
8275 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8277 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8278 add r3,r3,r10 ; Point to actual PTE
8279 lwz r4,0(r3) ; Get the top of the PTE
8281 li r8,tlbieLock ; Get the TLBIE lock
8282 rlwinm r0,r4,0,1,31 ; Clear the valid bit
8283 stw r0,0(r3) ; Invalidate the PTE
8285 sync ; Make sure everyone sees the invalidate
8287 mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
8288 mfsprg r2,2 ; Get feature flags
8289 mr. r0,r0 ; Is it locked?
8290 li r0,1 ; Get our lock word
8291 bne- mITLBIE32 ; It is locked, go wait...
8293 stwcx. r0,0,r8 ; Try to get it
8294 bne- mITLBIE32 ; We was beat...
8296 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
8297 li r0,0 ; Lock clear value
8299 tlbie r5 ; Invalidate it everywhere
8301 beq- mINoTS32 ; Can not have MP on this machine...
8303 eieio ; Make sure that the tlbie happens first
8304 tlbsync ; Wait for everyone to catch up
8305 sync ; Make sure of it all
8307 mINoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
8308 lwz r5,4(r3) ; Get the real part
8309 srwi r10,r5,12 ; Change physical address to a ppnum
8311 mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8312 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8313 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8314 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8315 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8316 add r11,r11,r8 ; Point to the bank table
8317 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8318 lwz r11,mrStart(r11) ; Get the start of bank
8319 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8320 addi r2,r2,4 ; Offset to last half of field
8321 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8322 sub r11,r10,r11 ; Get the index into the table
8323 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8326 mImrgRC: lwarx r10,r11,r2 ; Get the master RC
8327 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8328 or r0,r0,r10 ; Merge in the new RC
8329 stwcx. r0,r11,r2 ; Try to stick it back
8330 bne-- mImrgRC ; Try again if we collided...
8332 blr ; Leave with the PCA still locked up...
8334 mIPUnlock: eieio ; Make sure all updates come first
8336 stw r6,0(r7) ; Unlock
8345 lwz r0,mpPte(r31) ; Grab the PTE offset
8346 ld r5,mpVAddr(r31) ; Grab the virtual address
8347 mfsdr1 r7 ; Get the pointer to the hash table
8348 rldicr r10,r7,0,45 ; Clean up the hash table base
8349 andi. r3,r0,mpHValid ; Is there a possible PTE?
8350 srdi r7,r0,5 ; Convert to PCA units
8351 rldicr r7,r7,0,61 ; Clean up PCA
8352 subfic r7,r7,-4 ; Convert to -4 based negative index
8353 mflr r2 ; Save the return
8354 add r7,r10,r7 ; Point to the PCA directly
8355 beqlr-- ; There was no PTE to start with...
8357 bl mapLockPteg ; Lock the PTEG
8359 lwz r0,mpPte(r31) ; Grab the PTE offset again
8360 mtlr r2 ; Restore the LR
8361 andi. r3,r0,mpHValid ; Is there a possible PTE?
8362 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8364 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8365 add r3,r3,r10 ; Point to the actual PTE
8366 ld r4,0(r3) ; Get the top of the PTE
8368 li r8,tlbieLock ; Get the TLBIE lock
8369 rldicr r0,r4,0,62 ; Clear the valid bit
8370 std r0,0(r3) ; Invalidate the PTE
8372 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
8373 sync ; Make sure everyone sees the invalidate
8374 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
8376 mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
8377 mr. r0,r0 ; Is it locked?
8378 li r0,1 ; Get our lock word
8379 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
8381 stwcx. r0,0,r8 ; Try to get it
8382 bne-- mITLBIE64 ; We was beat...
8384 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
8386 li r0,0 ; Lock clear value
8388 tlbie r2 ; Invalidate it everywhere
8390 eieio ; Make sure that the tlbie happens first
8391 tlbsync ; Wait for everyone to catch up
8392 ptesync ; Wait for quiet again
8394 stw r0,tlbieLock(0) ; Clear the tlbie lock
8396 ld r5,8(r3) ; Get the real part
8397 srdi r10,r5,12 ; Change physical address to a ppnum
8398 b mINmerge ; Join the common 32-64-bit code...
8400 mITLBIE64a: li r5,lgKillResv ; Killing field
8401 stwcx. r5,0,r5 ; Kill reservation
8403 mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
8404 mr. r0,r0 ; Is it locked?
8405 beq++ mITLBIE64 ; Nope, try again...
8406 b mITLBIE64b ; Yup, wait for it...
8409 ; mapLockPteg - Locks a PTEG
8410 ; R7 points to PCA entry
8411 ; R6 contains PCA on return
8418 lwarx r6,0,r7 ; Pick up the PCA
8419 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8420 ori r0,r6,PCAlock ; Set the lock bit
8421 bne-- mLSkill ; It is locked...
8423 stwcx. r0,0,r7 ; Try to lock the PTEG
8424 bne-- mapLockPteg ; We collided...
8426 isync ; Nostradamus lied
8429 mLSkill: li r6,lgKillResv ; Get killing field
8430 stwcx. r6,0,r6 ; Kill it
8433 lwz r6,0(r7) ; Pick up the PCA
8434 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8435 beq++ mapLockPteg ; Nope, try again...
8436 b mapLockPteh ; Yes, wait for it...
8440 ; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
8441 ; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
8442 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
8443 ; R4 returns the slot index.
8445 ; CR7 also indicates that we have a block mapping
8447 ; The PTEG allocation controls are a bit map of the state of the PTEG.
8448 ; PCAfree indicates that the PTE slot is empty.
8449 ; PCAauto means that it comes from an autogen area. These
8450 ; guys do not keep track of reference and change and are actually "wired".
8451 ; They are easy to maintain. PCAsteal
8452 ; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
8453 ; fields fit in a single word and are loaded and stored under control of the
8454 ; PTEG control area lock (PCAlock).
8456 ; Note that PCAauto does not contribute to the steal calculations at all. Originally
8457 ; it did, autogens were second in priority. This can result in a pathalogical
8458 ; case where an instruction can not make forward progress, or one PTE slot
8461 ; Note that the PCA must be locked when we get here.
8463 ; Physically, the fields are arranged:
8470 ; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
8475 ; R3 = 1 - steal regular
8476 ; R3 = 2 - steal autogen
8477 ; R4 contains slot number
8478 ; R6 contains updated PCA image
8483 mapSelSlot: lis r10,0 ; Clear autogen mask
8484 li r9,0 ; Start a mask
8485 beq cr7,mSSnotblk ; Skip if this is not a block mapping
8486 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
8488 mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
8489 oris r9,r9,0x8000 ; Get a mask
8490 cntlzw r4,r6 ; Find a slot or steal one
8491 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
8492 rlwinm r4,r4,0,29,31 ; Isolate bit position
8493 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
8494 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
8495 srwi r11,r11,1 ; Slide steal mask right
8496 and r8,r6,r2 ; Isolate the old in use and autogen bits
8497 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
8498 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
8499 and r2,r2,r10 ; Keep the autogen part if autogen
8500 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
8501 or r6,r6,r2 ; Add in the new autogen bit
8502 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
8503 rlwinm r8,r8,1,31,31 ; Isolate old in use
8504 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
8506 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
8510 ; Shared/Exclusive locks
8512 ; A shared/exclusive lock allows multiple shares of a lock to be taken
8513 ; but only one exclusive. A shared lock can be "promoted" to exclusive
8514 ; when it is the only share. If there are multiple sharers, the lock
8515 ; must be "converted". A promotion drops the share and gains exclusive as
8516 ; an atomic operation. If anyone else has a share, the operation fails.
8517 ; A conversion first drops the share and then takes an exclusive lock.
8519 ; We will want to add a timeout to this eventually.
8521 ; R3 is set to 0 for success, non-zero for failure
8525 ; Convert a share into an exclusive
8532 lis r0,0x8000 ; Get the locked lock image
8534 mflr r0 ; (TEST/DEBUG)
8535 oris r0,r0,0x8000 ; (TEST/DEBUG)
8538 sxlkCTry: lwarx r2,0,r3 ; Get the lock word
8539 cmplwi r2,1 ; Does it just have our share?
8540 subi r2,r2,1 ; Drop our share in case we do not get it
8541 bne-- sxlkCnotfree ; No, we need to unlock...
8542 stwcx. r0,0,r3 ; Try to take it exclusively
8543 bne-- sxlkCTry ; Collision, try again...
8550 stwcx. r2,0,r3 ; Try to drop our share...
8551 bne-- sxlkCTry ; Try again if we collided...
8552 b sxlkExclusive ; Go take it exclusively...
8555 ; Promote shared to exclusive
8561 lis r0,0x8000 ; Get the locked lock image
8563 mflr r0 ; (TEST/DEBUG)
8564 oris r0,r0,0x8000 ; (TEST/DEBUG)
8567 sxlkPTry: lwarx r2,0,r3 ; Get the lock word
8568 cmplwi r2,1 ; Does it just have our share?
8569 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
8570 stwcx. r0,0,r3 ; Try to take it exclusively
8571 bne-- sxlkPTry ; Collision, try again...
8577 sxlkPkill: li r2,lgKillResv ; Point to killing field
8578 stwcx. r2,0,r2 ; Kill reservation
8584 ; Take lock exclusivily
8590 lis r0,0x8000 ; Get the locked lock image
8592 mflr r0 ; (TEST/DEBUG)
8593 oris r0,r0,0x8000 ; (TEST/DEBUG)
8596 sxlkXTry: lwarx r2,0,r3 ; Get the lock word
8597 mr. r2,r2 ; Is it locked?
8598 bne-- sxlkXWait ; Yes...
8599 stwcx. r0,0,r3 ; Try to take it
8600 bne-- sxlkXTry ; Collision, try again...
8602 isync ; Toss anything younger than us
8608 sxlkXWait: li r2,lgKillResv ; Point to killing field
8609 stwcx. r2,0,r2 ; Kill reservation
8611 sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
8612 mr. r2,r2 ; Is it free yet?
8613 beq++ sxlkXTry ; Yup...
8614 b sxlkXWaiu ; Hang around a bit more...
8617 ; Take a share of the lock
8622 sxlkShared: lwarx r2,0,r3 ; Get the lock word
8623 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8624 addi r2,r2,1 ; Up the share count
8625 bne-- sxlkSWait ; Yes...
8626 stwcx. r2,0,r3 ; Try to take it
8627 bne-- sxlkShared ; Collision, try again...
8629 isync ; Toss anything younger than us
8635 sxlkSWait: li r2,lgKillResv ; Point to killing field
8636 stwcx. r2,0,r2 ; Kill reservation
8638 sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
8639 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8640 beq++ sxlkShared ; Nope...
8641 b sxlkSWaiu ; Hang around a bit more...
8644 ; Unlock either exclusive or shared.
8649 sxlkUnlock: eieio ; Make sure we order our stores out
8651 sxlkUnTry: lwarx r2,0,r3 ; Get the lock
8652 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
8653 subi r2,r2,1 ; Remove our share if we have one
8654 li r0,0 ; Clear this
8655 bne-- sxlkUExclu ; We hold exclusive...
8657 stwcx. r2,0,r3 ; Try to lose our share
8658 bne-- sxlkUnTry ; Collision...
8661 sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
8662 beqlr++ ; Leave if ok...
8663 b sxlkUnTry ; Could not store, try over...
8667 .globl EXT(fillPage)
8671 mfsprg r0,2 ; Get feature flags
8672 mtcrf 0x02,r0 ; move pf64Bit to cr
8674 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
8675 lis r2,0x0200 ; Get vec
8677 ori r2,r2,0x2000 ; Get FP
8681 andc r5,r5,r2 ; Clear out permanent turn-offs
8683 ori r2,r2,0x8030 ; Clear IR, DR and EE
8685 andc r0,r5,r2 ; Kill them
8688 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
8690 slwi r3,r3,12 ; Make into a physical address
8691 mtmsr r2 ; Interrupts and translation off
8694 li r2,4096/32 ; Get number of cache lines
8696 fp32again: dcbz 0,r3 ; Clear
8697 addic. r2,r2,-1 ; Count down
8701 stw r8,12(r3) ; Fill
8702 stw r9,16(r3) ; Fill
8703 stw r10,20(r3) ; Fill
8704 stw r11,24(r3) ; Fill
8705 stw r12,28(r3) ; Fill
8706 addi r3,r3,32 ; Point next
8707 bgt+ fp32again ; Keep going
8709 mtmsr r5 ; Restore all
8716 sldi r2,r2,63 ; Get 64-bit bit
8717 or r0,r0,r2 ; Turn on 64-bit
8718 sldi r3,r3,12 ; Make into a physical address
8720 mtmsrd r0 ; Interrupts and translation off
8723 li r2,4096/128 ; Get number of cache lines
8725 fp64again: dcbz128 0,r3 ; Clear
8726 addic. r2,r2,-1 ; Count down
8729 std r7,16(r3) ; Fill
8730 std r8,24(r3) ; Fill
8731 std r9,32(r3) ; Fill
8732 std r10,40(r3) ; Fill
8733 std r11,48(r3) ; Fill
8734 std r12,56(r3) ; Fill
8735 std r4,64+0(r3) ; Fill
8736 std r6,64+8(r3) ; Fill
8737 std r7,64+16(r3) ; Fill
8738 std r8,64+24(r3) ; Fill
8739 std r9,64+32(r3) ; Fill
8740 std r10,64+40(r3) ; Fill
8741 std r11,64+48(r3) ; Fill
8742 std r12,64+56(r3) ; Fill
8743 addi r3,r3,128 ; Point next
8744 bgt+ fp64again ; Keep going
8746 mtmsrd r5 ; Restore all
8756 lis r11,hi16(EXT(mapdebug))
8757 ori r11,r11,lo16(EXT(mapdebug))
8762 mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
8777 .globl EXT(checkBogus)
8782 blr ; No-op normally