2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <db_machine_commands.h>
33 #include <mach_debug.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/exception.h>
37 #include <ppc/Performance.h>
38 #include <ppc/exception.h>
39 #include <mach/ppc/vm_param.h>
46 ; +--------+--------+--------+--------+--------+--------+--------+--------+
47 ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
48 ; +--------+--------+--------+--------+--------+--------+--------+--------+
52 ; +--------+--------+--------+
53 ; |//////BB|BBBBBBBB|BBBB////| - SID - base
54 ; +--------+--------+--------+
58 ; +--------+--------+--------+
59 ; |////////|11111111|111111//| - SID - copy 1
60 ; +--------+--------+--------+
64 ; +--------+--------+--------+
65 ; |////////|//222222|22222222| - SID - copy 2
66 ; +--------+--------+--------+
70 ; +--------+--------+--------+
71 ; |//////33|33333333|33//////| - SID - copy 3 - not needed
72 ; +--------+--------+--------+ for 65 bit VPN
76 ; +--------+--------+--------+--------+--------+--------+--------+
77 ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
78 ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
81 ; +--------+--------+--------+--------+--------+--------+--------+
82 ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
83 ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
90 ; +--------+--------+--------+--------+--------+--------+--------+
91 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
92 ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
94 ; 0 0 1 2 3 4 4 5 6 7 7
95 ; 0 8 6 4 2 0 8 6 4 2 9
96 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
97 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
98 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
102 /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
104 * Maps a page or block into a pmap
106 * Returns 0 if add worked or the vaddr of the first overlap if not
108 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
110 * 1) bump mapping busy count
112 * 3) find mapping full path - finds all possible list previous elements
113 * 4) upgrade pmap to exclusive
114 * 5) add mapping to search list
120 * 11) drop mapping busy count
123 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
125 * 1) bump mapping busy count
127 * 3) find mapping full path - finds all possible list previous elements
128 * 4) upgrade pmap to exclusive
129 * 5) add mapping to search list
131 * 7) drop mapping busy count
136 .globl EXT(hw_add_map)
140 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
141 mflr r0 ; Save the link register
142 stw r17,FM_ARG0+0x00(r1) ; Save a register
143 stw r18,FM_ARG0+0x04(r1) ; Save a register
144 stw r19,FM_ARG0+0x08(r1) ; Save a register
145 mfsprg r19,2 ; Get feature flags
146 stw r20,FM_ARG0+0x0C(r1) ; Save a register
147 stw r21,FM_ARG0+0x10(r1) ; Save a register
148 mtcrf 0x02,r19 ; move pf64Bit cr6
149 stw r22,FM_ARG0+0x14(r1) ; Save a register
150 stw r23,FM_ARG0+0x18(r1) ; Save a register
151 stw r24,FM_ARG0+0x1C(r1) ; Save a register
152 stw r25,FM_ARG0+0x20(r1) ; Save a register
153 stw r26,FM_ARG0+0x24(r1) ; Save a register
154 stw r27,FM_ARG0+0x28(r1) ; Save a register
155 stw r28,FM_ARG0+0x2C(r1) ; Save a register
156 stw r29,FM_ARG0+0x30(r1) ; Save a register
157 stw r30,FM_ARG0+0x34(r1) ; Save a register
158 stw r31,FM_ARG0+0x38(r1) ; Save a register
159 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
162 lwz r11,pmapFlags(r3) ; Get pmaps flags
163 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
164 bne hamPanic ; Call not valid for guest shadow assist pmap
167 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
168 mr r28,r3 ; Save the pmap
169 mr r31,r4 ; Save the mapping
170 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
171 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
172 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
176 hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
177 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
179 hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
181 mr r17,r11 ; Save the MSR
182 xor r28,r28,r20 ; Convert the pmap to physical addressing
183 xor r31,r31,r21 ; Convert the mapping to physical addressing
185 la r3,pmapSXlk(r28) ; Point to the pmap search lock
186 bl sxlkShared ; Go get a shared lock on the mapping lists
187 mr. r3,r3 ; Did we get the lock?
188 lwz r24,mpFlags(r31) ; Pick up the flags
189 bne-- hamBadLock ; Nope...
191 li r21,0 ; Remember that we have the shared lock
194 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
195 ; here so that we will know the previous elements so we can dequeue them
199 hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
200 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
201 mr r3,r28 ; Pass in pmap to search
202 lhz r23,mpBSize(r31) ; Get the block size for later
203 mr r29,r4 ; Save top half of vaddr for later
204 mr r30,r5 ; Save bottom half of vaddr for later
206 bl EXT(mapSearchFull) ; Go see if we can find it
208 li r22,lo16(0x800C) ; Get 0xFFFF800C
209 rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
210 addi r23,r23,1 ; Get actual length
211 rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25
212 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
213 slw r9,r23,r22 ; Isolate the low part
214 rlwnm r22,r23,r22,22,31 ; Extract the high order
215 addic r23,r9,-4096 ; Get the length to the last page
216 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
217 addme r22,r22 ; Do high order as well...
218 mr. r3,r3 ; Did we find a mapping here?
219 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
220 bne-- hamOverlay ; We found a mapping, this is no good, can not double map...
222 addc r9,r0,r23 ; Add size to get last page in new range
223 or. r0,r4,r5 ; Are we beyond the end?
224 adde r8,r29,r22 ; Add the rest of the length on
225 rlwinm r9,r9,0,0,31 ; Clean top half of sum
226 beq++ hamFits ; We are at the end...
228 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
229 cmplw r8,r4 ; Is our end before the next (top part)
230 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
231 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
233 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
236 ; Here we try to convert to an exclusive lock. This will fail if someone else
239 hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
240 la r3,pmapSXlk(r28) ; Point to the pmap search lock
242 bne-- hamGotX ; We already have the exclusive...
244 bl sxlkPromote ; Try to promote shared to exclusive
245 mr. r3,r3 ; Could we?
246 beq++ hamGotX ; Yeah...
249 ; Since we could not promote our lock, we need to convert to it.
250 ; That means that we drop the shared lock and wait to get it
251 ; exclusive. Since we release the lock, we need to do the look up
255 la r3,pmapSXlk(r28) ; Point to the pmap search lock
256 bl sxlkConvert ; Convert shared to exclusive
257 mr. r3,r3 ; Could we?
258 bne-- hamBadLock ; Nope, we must have timed out...
260 li r21,1 ; Remember that we have the exclusive lock
261 b hamRescan ; Go look again...
265 hamGotX: mr r3,r28 ; Get the pmap to insert into
266 mr r4,r31 ; Point to the mapping
267 bl EXT(mapInsert) ; Insert the mapping into the list
269 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
270 lhz r8,mpSpace(r31) ; Get the address space
271 lwz r11,lgpPcfg(r11) ; Get the page config
272 mfsdr1 r7 ; Get the hash table base/bounds
274 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
275 lwz r12,pmapResidentMax(r28) ; r12 = pmap->stats.resident_max
276 addi r4,r4,1 ; Bump up the mapped page count
277 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
278 cmplw r12,r4 ; if pmap->stats.resident_max >= pmap->stats.resident_count
279 bge+ hamSkipMax ; goto hamSkipResMax
280 stw r4,pmapResidentMax(r28) ; pmap->stats.resident_max = pmap->stats.resident_count
282 hamSkipMax: andi. r0,r24,mpType ; Is this a normal mapping?
284 rlwimi r8,r8,14,4,17 ; Double address space
285 rlwinm r9,r30,0,4,31 ; Clear segment
286 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
287 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
288 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
289 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
290 srw r9,r9,r11 ; Isolate just the page index
291 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
293 xor r9,r9,r10 ; Get the hash to the PTEG
295 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
297 bl mapPhysFindLock ; Go find and lock the physent
299 bt++ pf64Bitb,ham64 ; This is 64-bit...
301 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
302 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
303 slwi r9,r9,6 ; Make PTEG offset
304 ori r7,r7,0xFFC0 ; Stick in the bottom part
305 rlwinm r12,r11,0,~ppFlags ; Clean it up
306 and r9,r9,r7 ; Wrap offset into table
307 mr r4,r31 ; Set the link to install
308 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
309 stw r12,mpAlias+4(r31) ; Move to the mapping
310 bl mapPhyCSet32 ; Install the link
311 b hamDone ; Go finish up...
315 ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
316 subfic r7,r7,46 ; Get number of leading zeros
317 eqv r4,r4,r4 ; Get all ones
318 ld r11,ppLink(r3) ; Get the alias chain pointer
319 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
320 srd r4,r4,r7 ; Get the wrap mask
321 sldi r9,r9,7 ; Change hash to PTEG offset
322 andc r11,r11,r0 ; Clean out the lock and flags
323 and r9,r9,r4 ; Wrap to PTEG
325 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
326 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
328 bl mapPhyCSet64 ; Install the link
330 hamDone: bl mapPhysUnlock ; Unlock the physent chain
332 hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
333 bl sxlkUnlock ; Unlock the search list
335 mr r3,r31 ; Get the mapping pointer
336 bl mapDropBusy ; Drop the busy count
338 li r3,0 ; Set successful return
339 li r4,0 ; Set successful return
341 hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
343 mtmsr r17 ; Restore enables/translation/etc.
345 b hamReturnC ; Join common...
347 hamR64: mtmsrd r17 ; Restore enables/translation/etc.
350 hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
351 lwz r17,FM_ARG0+0x00(r1) ; Save a register
352 lwz r18,FM_ARG0+0x04(r1) ; Save a register
353 lwz r19,FM_ARG0+0x08(r1) ; Save a register
354 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
355 mtlr r0 ; Restore the return
356 lwz r21,FM_ARG0+0x10(r1) ; Save a register
357 lwz r22,FM_ARG0+0x14(r1) ; Save a register
358 lwz r23,FM_ARG0+0x18(r1) ; Save a register
359 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
360 lwz r25,FM_ARG0+0x20(r1) ; Save a register
361 lwz r26,FM_ARG0+0x24(r1) ; Save a register
362 lwz r27,FM_ARG0+0x28(r1) ; Save a register
363 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
364 lwz r29,FM_ARG0+0x30(r1) ; Save a register
365 lwz r30,FM_ARG0+0x34(r1) ; Save a register
366 lwz r31,FM_ARG0+0x38(r1) ; Save a register
367 lwz r1,0(r1) ; Pop the stack
374 hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
375 li r0,mpC|mpR ; Get a mask to turn off RC bits
376 lwz r23,mpFlags(r31) ; Get the requested flags
377 lwz r20,mpVAddr(r3) ; Get the overlay address
378 lwz r8,mpVAddr(r31) ; Get the requested address
379 lwz r21,mpVAddr+4(r3) ; Get the overlay address
380 lwz r9,mpVAddr+4(r31) ; Get the requested address
381 lhz r10,mpBSize(r3) ; Get the overlay length
382 lhz r11,mpBSize(r31) ; Get the requested length
383 lwz r24,mpPAddr(r3) ; Get the overlay physical address
384 lwz r25,mpPAddr(r31) ; Get the requested physical address
385 andc r21,r21,r0 ; Clear RC bits
386 andc r9,r9,r0 ; Clear RC bits
388 la r3,pmapSXlk(r28) ; Point to the pmap search lock
389 bl sxlkUnlock ; Unlock the search list
391 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
392 mr r3,r20 ; Save the top of the colliding address
393 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
395 bne++ hamRemv ; Removing, go say so so we help...
397 cmplw r20,r8 ; High part of vaddr the same?
398 cmplw cr1,r21,r9 ; Low part?
399 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
401 cmplw r10,r11 ; Size the same?
402 cmplw cr1,r24,r25 ; Physical address?
403 crand cr5_eq,cr5_eq,cr0_eq ; Remember
404 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
406 xor r23,r23,r22 ; Compare mapping flag words
407 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
408 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
409 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
411 ori r4,r4,mapRtMapDup ; Set duplicate
412 b hamReturn ; And leave...
414 hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
415 b hamReturn ; Come back yall...
417 hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
418 b hamReturn ; Join common epilog code
422 hamBadLock: li r3,0 ; Set lock time out error code
423 li r4,mapRtBadLk ; Set lock time out error code
424 b hamReturn ; Leave....
426 hamPanic: lis r0,hi16(Choke) ; System abend
427 ori r0,r0,lo16(Choke) ; System abend
428 li r3,failMapping ; Show that we failed some kind of mapping thing
435 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
437 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
438 * a 64-bit quantity, it is a long long so it is in R4 and R5.
440 * We return the virtual address of the removed mapping as a
443 * Note that this is designed to be called from 32-bit mode with a stack.
445 * We disable translation and all interruptions here. This keeps is
446 * from having to worry about a deadlock due to having anything locked
447 * and needing it to process a fault.
449 * Note that this must be done with both interruptions off and VM off
451 * Remove mapping via pmap, regular page, no pte
454 * 2) find mapping full path - finds all possible list previous elements
455 * 4) upgrade pmap to exclusive
456 * 3) bump mapping busy count
457 * 5) remove mapping from search list
460 * 8) remove from physent
462 * 10) drop mapping busy count
463 * 11) drain mapping busy count
466 * Remove mapping via pmap, regular page, with pte
469 * 2) find mapping full path - finds all possible list previous elements
470 * 3) upgrade lock to exclusive
471 * 4) bump mapping busy count
473 * 6) invalidate pte and tlbie
474 * 7) atomic merge rc into physent
476 * 9) remove mapping from search list
479 * 12) remove from physent
481 * 14) drop mapping busy count
482 * 15) drain mapping busy count
485 * Remove mapping via pmap, I/O or block
488 * 2) find mapping full path - finds all possible list previous elements
489 * 3) upgrade lock to exclusive
490 * 4) bump mapping busy count
491 * 5) mark remove-in-progress
492 * 6) check and bump remove chunk cursor if needed
494 * 8) if something to invalidate, go to step 11
497 * 10) return with mapRtRemove to force higher level to call again
500 * 12) invalidate ptes, no tlbie
502 * 14) repeat 11 - 13 for all pages in chunk
503 * 15) if not final chunk, go to step 9
504 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
505 * 17) lock pmap share
506 * 18) find mapping full path - finds all possible list previous elements
507 * 19) upgrade lock to exclusive
508 * 20) remove mapping from search list
509 * 21) drop mapping busy count
510 * 22) drain mapping busy count
515 .globl EXT(hw_rem_map)
520 ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
521 ; THE HW_PURGE_* ROUTINES ALSO
524 #define hrmStackSize ((31-15+1)*4)+4
525 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
526 mflr r0 ; Save the link register
527 stw r15,FM_ARG0+0x00(r1) ; Save a register
528 stw r16,FM_ARG0+0x04(r1) ; Save a register
529 stw r17,FM_ARG0+0x08(r1) ; Save a register
530 stw r18,FM_ARG0+0x0C(r1) ; Save a register
531 stw r19,FM_ARG0+0x10(r1) ; Save a register
532 mfsprg r19,2 ; Get feature flags
533 stw r20,FM_ARG0+0x14(r1) ; Save a register
534 stw r21,FM_ARG0+0x18(r1) ; Save a register
535 mtcrf 0x02,r19 ; move pf64Bit cr6
536 stw r22,FM_ARG0+0x1C(r1) ; Save a register
537 stw r23,FM_ARG0+0x20(r1) ; Save a register
538 stw r24,FM_ARG0+0x24(r1) ; Save a register
539 stw r25,FM_ARG0+0x28(r1) ; Save a register
540 stw r26,FM_ARG0+0x2C(r1) ; Save a register
541 stw r27,FM_ARG0+0x30(r1) ; Save a register
542 stw r28,FM_ARG0+0x34(r1) ; Save a register
543 stw r29,FM_ARG0+0x38(r1) ; Save a register
544 stw r30,FM_ARG0+0x3C(r1) ; Save a register
545 stw r31,FM_ARG0+0x40(r1) ; Save a register
546 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
547 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
550 lwz r11,pmapFlags(r3) ; Get pmaps flags
551 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
552 bne hrmPanic ; Call not valid for guest shadow assist pmap
555 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
556 lwz r9,pmapvr+4(r3) ; Get conversion mask
559 hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
562 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
564 xor r28,r3,r9 ; Convert the pmap to physical addressing
567 ; Here is where we join in from the hw_purge_* routines
570 hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
571 mfsprg r19,2 ; Get feature flags again (for alternate entries)
573 mr r17,r11 ; Save the MSR
574 mr r29,r4 ; Top half of vaddr
575 mr r30,r5 ; Bottom half of vaddr
577 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
578 bne-- hrmGuest ; Yes, handle specially
580 la r3,pmapSXlk(r28) ; Point to the pmap search lock
581 bl sxlkShared ; Go get a shared lock on the mapping lists
582 mr. r3,r3 ; Did we get the lock?
583 bne-- hrmBadLock ; Nope...
586 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
587 ; here so that we will know the previous elements so we can dequeue them
588 ; later. Note: we get back mpFlags in R7.
591 mr r3,r28 ; Pass in pmap to search
592 mr r4,r29 ; High order of address
593 mr r5,r30 ; Low order of address
594 bl EXT(mapSearchFull) ; Go see if we can find it
596 andi. r0,r7,mpPerm ; Mapping marked permanent?
597 crmove cr5_eq,cr0_eq ; Remember permanent marking
598 mr r20,r7 ; Remember mpFlags
599 mr. r31,r3 ; Did we? (And remember mapping address for later)
600 mr r15,r4 ; Save top of next vaddr
601 mr r16,r5 ; Save bottom of next vaddr
602 beq-- hrmNotFound ; Nope, not found...
604 bf-- cr5_eq,hrmPerm ; This one can't be removed...
606 ; Here we try to promote to an exclusive lock. This will fail if someone else
610 la r3,pmapSXlk(r28) ; Point to the pmap search lock
611 bl sxlkPromote ; Try to promote shared to exclusive
612 mr. r3,r3 ; Could we?
613 beq++ hrmGotX ; Yeah...
616 ; Since we could not promote our lock, we need to convert to it.
617 ; That means that we drop the shared lock and wait to get it
618 ; exclusive. Since we release the lock, we need to do the look up
622 la r3,pmapSXlk(r28) ; Point to the pmap search lock
623 bl sxlkConvert ; Convert shared to exclusive
624 mr. r3,r3 ; Could we?
625 bne-- hrmBadLock ; Nope, we must have timed out...
627 mr r3,r28 ; Pass in pmap to search
628 mr r4,r29 ; High order of address
629 mr r5,r30 ; Low order of address
630 bl EXT(mapSearchFull) ; Rescan the list
632 andi. r0,r7,mpPerm ; Mapping marked permanent?
633 crmove cr5_eq,cr0_eq ; Remember permanent marking
634 mr. r31,r3 ; Did we lose it when we converted?
635 mr r20,r7 ; Remember mpFlags
636 mr r15,r4 ; Save top of next vaddr
637 mr r16,r5 ; Save bottom of next vaddr
638 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
640 bf-- cr5_eq,hrmPerm ; This one can't be removed...
643 ; We have an exclusive lock on the mapping chain. And we
644 ; also have the busy count bumped in the mapping so it can
648 hrmGotX: mr r3,r31 ; Get the mapping
649 bl mapBumpBusy ; Bump up the busy count
652 ; Invalidate any PTEs associated with this
653 ; mapping (more than one if a block) and accumulate the reference
656 ; Here is also where we need to split 32- and 64-bit processing
659 lwz r21,mpPte(r31) ; Grab the offset to the PTE
660 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
661 mfsdr1 r29 ; Get the hash table base and size
663 rlwinm r0,r20,0,mpType ; Isolate mapping type
664 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
665 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
667 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
668 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
669 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
670 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
671 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
672 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
673 andc r29,r29,r2 ; Clean up hash table base
674 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
675 mr r30,r23 ; Move the now merged vaddr to the correct register
676 add r26,r29,r21 ; Point to the PTEG slot
678 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
680 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
681 beq- cr5,hrmBlock32 ; Go treat block specially...
682 subfic r9,r9,-4 ; Get the PCA entry offset
683 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
684 add r7,r9,r29 ; Point to the PCA slot
686 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
688 lwz r21,mpPte(r31) ; Get the quick pointer again
689 lwz r5,0(r26) ; Get the top of PTE
691 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
692 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
693 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
694 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
695 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
697 stw r5,0(r26) ; Invalidate the PTE
699 li r9,tlbieLock ; Get the TLBIE lock
701 sync ; Make sure the invalid PTE is actually in memory
703 hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
704 mr. r5,r5 ; Is it locked?
705 li r5,1 ; Get locked indicator
706 bne- hrmPtlb32 ; It is locked, go spin...
707 stwcx. r5,0,r9 ; Try to get it
708 bne- hrmPtlb32 ; We was beat...
710 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
712 tlbie r30 ; Invalidate it all corresponding TLB entries
714 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
716 eieio ; Make sure that the tlbie happens first
717 tlbsync ; Wait for everyone to catch up
718 sync ; Make sure of it all
720 hrmNTlbs: li r0,0 ; Clear this
721 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
722 stw r0,tlbieLock(0) ; Clear the tlbie lock
723 lis r0,0x8000 ; Get bit for slot 0
724 eieio ; Make sure those RC bit have been stashed in PTE
726 srw r0,r0,r2 ; Get the allocation hash mask
727 lwz r22,4(r26) ; Get the latest reference and change bits
728 or r6,r6,r0 ; Show that this slot is free
731 eieio ; Make sure all updates come first
732 stw r6,0(r7) ; Unlock the PTEG
735 ; Now, it is time to remove the mapping and unlock the chain.
736 ; But first, we need to make sure no one else is using this
737 ; mapping so we drain the busy now
740 hrmPysDQ32: mr r3,r31 ; Point to the mapping
741 bl mapDrainBusy ; Go wait until mapping is unused
743 mr r3,r28 ; Get the pmap to remove from
744 mr r4,r31 ; Point to the mapping
745 bl EXT(mapRemove) ; Remove the mapping from the list
747 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
748 rlwinm r0,r20,0,mpType ; Isolate mapping type
749 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
750 la r3,pmapSXlk(r28) ; Point to the pmap search lock
751 subi r4,r4,1 ; Drop down the mapped page count
752 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
753 bl sxlkUnlock ; Unlock the search list
755 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
757 bl mapPhysFindLock ; Go find and lock the physent
759 lwz r9,ppLink+4(r3) ; Get first mapping
761 mr r4,r22 ; Get the RC bits we just got
762 bl mapPhysMerge ; Go merge the RC bits
764 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
766 cmplw r9,r31 ; Are we the first on the list?
767 bne- hrmNot1st ; Nope...
770 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
771 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
772 bl mapPhyCSet32 ; Go set the physent link and preserve flags
774 b hrmPhyDQd ; Join up and unlock it all...
778 hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
779 and r8,r8,r31 ; Get back to a page
780 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
782 la r3,pmapSXlk(r28) ; Point to the pmap search lock
783 bl sxlkUnlock ; Unlock the search list
785 xor r3,r31,r8 ; Flip mapping address to virtual
786 ori r3,r3,mapRtPerm ; Set permanent mapping error
789 hrmBadLock: li r3,mapRtBadLk ; Set bad lock
793 la r3,pmapSXlk(r28) ; Point to the pmap search lock
794 bl sxlkUnlock ; Unlock the search list
797 mr r3,r31 ; Point to the mapping
798 bl mapDropBusy ; Drop the busy here since we need to come back
799 li r3,mapRtRemove ; Say we are still removing this
805 la r3,pmapSXlk(r28) ; Point to the pmap search lock
806 bl sxlkUnlock ; Unlock the search list
807 li r3,mapRtNotFnd ; No mapping found
809 hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
811 mtmsr r17 ; Restore enables/translation/etc.
813 b hrmRetnCmn ; Join the common return code...
815 hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
817 b hrmRetnCmn ; Join the common return code...
821 hrmNot1st: mr. r8,r9 ; Remember and test current node
822 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
823 lwz r9,mpAlias+4(r9) ; Chain to the next
824 cmplw r9,r31 ; Is this us?
825 bne- hrmNot1st ; Not us...
827 lwz r9,mpAlias+4(r9) ; Get our forward pointer
828 stw r9,mpAlias+4(r8) ; Unchain us
832 hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
834 hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
835 mr r3,r31 ; Copy the pointer to the mapping
836 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
837 bl mapDrainBusy ; Go wait until mapping is unused
839 xor r3,r31,r8 ; Flip mapping address to virtual
841 mtmsr r17 ; Restore enables/translation/etc.
844 hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
845 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
846 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
847 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
848 mr. r6,r6 ; Should we pass back the "next" vaddr?
849 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
850 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
851 mtlr r0 ; Restore the return
853 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
854 beq hrmNoNextAdr ; Do not pass back the next vaddr...
855 stw r15,0(r6) ; Pass back the top of the next vaddr
856 stw r16,4(r6) ; Pass back the bottom of the next vaddr
859 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
860 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
861 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
862 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
863 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
864 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
865 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
866 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
867 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
868 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
869 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
870 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
871 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
872 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
873 lwz r1,0(r1) ; Pop the stack
877 ; Here is where we come when all is lost. Somehow, we failed a mapping function
878 ; that must work... All hope is gone. Alas, we die.......
881 hrmPanic: lis r0,hi16(Choke) ; System abend
882 ori r0,r0,lo16(Choke) ; System abend
883 li r3,failMapping ; Show that we failed some kind of mapping thing
888 ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
889 ; in the range. Then, if we did not finish, return a code indicating that we need to
890 ; be called again. Eventually, we will finish and then, we will do a TLBIE for each
891 ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
893 ; A potential speed up is that we stop the invalidate loop once we have walked through
894 ; the hash table once. This really is not worth the trouble because we need to have
895 ; mapped 1/2 of physical RAM in an individual block. Way unlikely.
897 ; We should rethink this and see if we think it will be faster to check PTE and
898 ; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
903 hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu
904 rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
905 lhz r25,mpBSize(r31) ; Get the number of pages in block
906 lhz r23,mpSpace(r31) ; Get the address space hash
907 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
908 rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13
909 addi r25,r25,1 ; Account for zero-based counting
910 ori r0,r20,mpRIP ; Turn on the remove in progress flag
911 slw r25,r25,r29 ; Adjust for 32MB if needed
912 mfsdr1 r29 ; Get the hash table base and size
913 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
914 subi r25,r25,1 ; Convert back to zero-based counting
915 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
916 sub r4,r25,r9 ; Get number of pages left
917 cmplw cr1,r9,r25 ; Have we already hit the end?
918 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
919 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
920 rlwinm r26,r29,16,7,15 ; Get the hash table size
921 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
922 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
923 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
924 cmpwi cr7,r2,0 ; Remember if we have finished
925 slwi r0,r9,12 ; Make cursor into page offset
926 or r24,r24,r23 ; Get full hash
927 and r4,r4,r2 ; If more than a chunk, bring this back to 0
928 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
929 add r27,r27,r0 ; Adjust vaddr to start of current chunk
930 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
932 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
934 la r3,pmapSXlk(r28) ; Point to the pmap search lock
935 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
936 bl sxlkUnlock ; Unlock the search list while we are invalidating
938 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
939 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
940 xor r24,r24,r8 ; Get the proper VSID
941 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
942 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
943 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
944 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
945 add r22,r22,r30 ; Get end address (in PTEG units)
947 hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
948 xor r23,r23,r24 ; Hash it
949 and r23,r23,r26 ; Wrap it into the table
950 rlwinm r3,r23,28,4,29 ; Change to PCA offset
951 subfic r3,r3,-4 ; Get the PCA entry offset
952 add r7,r3,r29 ; Point to the PCA slot
953 cmplw cr5,r30,r22 ; Check if we reached the end of the range
954 addi r30,r30,64 ; bump to the next vaddr
956 bl mapLockPteg ; Lock the PTEG
958 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
959 add r5,r23,r29 ; Point to the PTEG
960 li r0,0 ; Set an invalid PTE value
961 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
962 mtcrf 0x80,r4 ; Set CRs to select PTE slots
963 mtcrf 0x40,r4 ; Set CRs to select PTE slots
965 bf 0,hrmSlot0 ; No autogen here
966 stw r0,0x00(r5) ; Invalidate PTE
968 hrmSlot0: bf 1,hrmSlot1 ; No autogen here
969 stw r0,0x08(r5) ; Invalidate PTE
971 hrmSlot1: bf 2,hrmSlot2 ; No autogen here
972 stw r0,0x10(r5) ; Invalidate PTE
974 hrmSlot2: bf 3,hrmSlot3 ; No autogen here
975 stw r0,0x18(r5) ; Invalidate PTE
977 hrmSlot3: bf 4,hrmSlot4 ; No autogen here
978 stw r0,0x20(r5) ; Invalidate PTE
980 hrmSlot4: bf 5,hrmSlot5 ; No autogen here
981 stw r0,0x28(r5) ; Invalidate PTE
983 hrmSlot5: bf 6,hrmSlot6 ; No autogen here
984 stw r0,0x30(r5) ; Invalidate PTE
986 hrmSlot6: bf 7,hrmSlot7 ; No autogen here
987 stw r0,0x38(r5) ; Invalidate PTE
989 hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
990 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
991 andc r6,r6,r0 ; Turn off all the old autogen bits
993 hrmBNone32: eieio ; Make sure all updates come first
995 stw r6,0(r7) ; Unlock and set the PCA
997 bne+ cr5,hrmBInv32 ; Go invalidate the next...
999 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1001 mr r3,r31 ; Copy the pointer to the mapping
1002 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1004 sync ; Make sure memory is consistent
1006 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1007 li r6,63 ; Assume full invalidate for now
1008 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1009 andc r6,r6,r5 ; Clear max if we have less to do
1010 and r5,r25,r5 ; Clear count if we have more than max
1011 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1012 li r7,tlbieLock ; Get the TLBIE lock
1013 or r5,r5,r6 ; Get number of TLBIEs needed
1015 hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1016 mr. r2,r2 ; Is it locked?
1017 li r2,1 ; Get our lock value
1018 bne- hrmBTLBlck ; It is locked, go wait...
1019 stwcx. r2,0,r7 ; Try to get it
1020 bne- hrmBTLBlck ; We was beat...
1022 hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1023 tlbie r27 ; Invalidate it everywhere
1024 addi r27,r27,0x1000 ; Up to the next page
1025 bge+ hrmBTLBi ; Make sure we have done it all...
1027 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1028 li r2,0 ; Lock clear value
1030 sync ; Make sure all is quiet
1031 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1033 eieio ; Make sure that the tlbie happens first
1034 tlbsync ; Wait for everyone to catch up
1035 sync ; Wait for quiet again
1037 hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1039 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1040 bl sxlkShared ; Go get a shared lock on the mapping lists
1041 mr. r3,r3 ; Did we get the lock?
1042 bne- hrmPanic ; Nope...
1044 lwz r4,mpVAddr(r31) ; High order of address
1045 lwz r5,mpVAddr+4(r31) ; Low order of address
1046 mr r3,r28 ; Pass in pmap to search
1047 mr r29,r4 ; Save this in case we need it (only promote fails)
1048 mr r30,r5 ; Save this in case we need it (only promote fails)
1049 bl EXT(mapSearchFull) ; Go see if we can find it
1051 mr. r3,r3 ; Did we? (And remember mapping address for later)
1052 mr r15,r4 ; Save top of next vaddr
1053 mr r16,r5 ; Save bottom of next vaddr
1054 beq- hrmPanic ; Nope, not found...
1056 cmplw r3,r31 ; Same mapping?
1057 bne- hrmPanic ; Not good...
1059 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1060 bl sxlkPromote ; Try to promote shared to exclusive
1061 mr. r3,r3 ; Could we?
1062 mr r3,r31 ; Restore the mapping pointer
1063 beq+ hrmBDone1 ; Yeah...
1065 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1066 bl sxlkConvert ; Convert shared to exclusive
1067 mr. r3,r3 ; Could we?
1068 bne-- hrmPanic ; Nope, we must have timed out...
1070 mr r3,r28 ; Pass in pmap to search
1071 mr r4,r29 ; High order of address
1072 mr r5,r30 ; Low order of address
1073 bl EXT(mapSearchFull) ; Rescan the list
1075 mr. r3,r3 ; Did we lose it when we converted?
1076 mr r15,r4 ; Save top of next vaddr
1077 mr r16,r5 ; Save bottom of next vaddr
1078 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1080 hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1082 mr r3,r28 ; Get the pmap to remove from
1083 mr r4,r31 ; Point to the mapping
1084 bl EXT(mapRemove) ; Remove the mapping from the list
1086 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1087 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1088 subi r4,r4,1 ; Drop down the mapped page count
1089 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1090 bl sxlkUnlock ; Unlock the search list
1092 b hrmRetn32 ; We are all done, get out...
1095 ; Here we handle the 64-bit version of hw_rem_map
1100 hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1101 beq-- cr5,hrmBlock64 ; Go treat block specially...
1102 subfic r9,r9,-4 ; Get the PCA entry offset
1103 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1104 add r7,r9,r29 ; Point to the PCA slot
1106 bl mapLockPteg ; Go lock up the PTEG
1108 lwz r21,mpPte(r31) ; Get the quick pointer again
1109 ld r5,0(r26) ; Get the top of PTE
1111 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1112 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
1113 sldi r23,r5,16 ; Shift AVPN up to EA format
1114 // **** Need to adjust above shift based on the page size - large pages need to shift a bit more
1115 rldicr r5,r5,0,62 ; Clear the valid bit
1116 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1117 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1118 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1120 std r5,0(r26) ; Invalidate the PTE
1122 li r9,tlbieLock ; Get the TLBIE lock
1124 sync ; Make sure the invalid PTE is actually in memory
1126 hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1127 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1128 mr. r5,r5 ; Is it locked?
1129 li r5,1 ; Get locked indicator
1130 bne-- hrmPtlb64w ; It is locked, go spin...
1131 stwcx. r5,0,r9 ; Try to get it
1132 bne-- hrmPtlb64 ; We was beat...
1134 tlbie r23 ; Invalidate all corresponding TLB entries
1136 eieio ; Make sure that the tlbie happens first
1137 tlbsync ; Wait for everyone to catch up
1139 ptesync ; Make sure of it all
1140 li r0,0 ; Clear this
1141 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1142 stw r0,tlbieLock(0) ; Clear the tlbie lock
1143 oris r0,r0,0x8000 ; Assume slot 0
1145 srw r0,r0,r2 ; Get slot mask to deallocate
1147 lwz r22,12(r26) ; Get the latest reference and change bits
1148 or r6,r6,r0 ; Make the guy we killed free
1151 eieio ; Make sure all updates come first
1153 stw r6,0(r7) ; Unlock and change the PCA
1155 hrmPysDQ64: mr r3,r31 ; Point to the mapping
1156 bl mapDrainBusy ; Go wait until mapping is unused
1158 mr r3,r28 ; Get the pmap to remove from
1159 mr r4,r31 ; Point to the mapping
1160 bl EXT(mapRemove) ; Remove the mapping from the list
1162 rlwinm r0,r20,0,mpType ; Isolate mapping type
1163 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
1164 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1165 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1166 subi r4,r4,1 ; Drop down the mapped page count
1167 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1168 bl sxlkUnlock ; Unlock the search list
1170 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1172 bl mapPhysFindLock ; Go find and lock the physent
1174 li r0,ppLFAmask ; Get mask to clean up mapping pointer
1175 ld r9,ppLink(r3) ; Get first mapping
1176 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1177 mr r4,r22 ; Get the RC bits we just got
1179 bl mapPhysMerge ; Go merge the RC bits
1181 andc r9,r9,r0 ; Clean up the mapping pointer
1183 cmpld r9,r31 ; Are we the first on the list?
1184 bne-- hrmNot1st64 ; Nope...
1187 ld r4,mpAlias(r31) ; Get our forward pointer
1189 std r9,mpAlias(r31) ; Make sure we are off the chain
1190 bl mapPhyCSet64 ; Go set the physent link and preserve flags
1192 b hrmPhyDQd64 ; Join up and unlock it all...
1194 hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1195 stwcx. r5,0,r5 ; Clear the pending reservation
1198 hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1199 mr. r5,r5 ; is it locked?
1200 beq++ hrmPtlb64 ; Nope...
1201 b hrmPtlb64x ; Sniff some more...
1206 mr. r8,r9 ; Remember and test current node
1207 beq-- hrmPhyDQd64 ; Could not find our node...
1208 ld r9,mpAlias(r9) ; Chain to the next
1209 cmpld r9,r31 ; Is this us?
1210 bne-- hrmNot1st64 ; Not us...
1212 ld r9,mpAlias(r9) ; Get our forward pointer
1213 std r9,mpAlias(r8) ; Unchain us
1218 bl mapPhysUnlock ; Unlock the physent chain
1220 hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1221 mr r3,r31 ; Copy the pointer to the mapping
1222 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1223 bl mapDrainBusy ; Go wait until mapping is unused
1225 xor r3,r31,r8 ; Flip mapping address to virtual
1227 mtmsrd r17 ; Restore enables/translation/etc.
1230 b hrmRetnCmn ; Join the common return path...
1234 ; Check hrmBlock32 for comments.
1239 hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu
1240 rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
1241 lhz r24,mpSpace(r31) ; Get the address space hash
1242 lhz r25,mpBSize(r31) ; Get the number of pages in block
1243 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1244 rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13
1245 addi r25,r25,1 ; Account for zero-based counting
1246 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1247 slw r25,r25,r29 ; Adjust for 32MB if needed
1248 mfsdr1 r29 ; Get the hash table base and size
1249 ld r27,mpVAddr(r31) ; Get the base vaddr
1250 subi r25,r25,1 ; Convert back to zero-based counting
1251 rlwinm r5,r29,0,27,31 ; Isolate the size
1252 sub r4,r25,r9 ; Get number of pages left
1253 cmplw cr1,r9,r25 ; Have we already hit the end?
1254 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1255 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1256 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1257 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1258 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1259 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1260 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1261 srdi r27,r27,12 ; Change address into page index
1262 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1263 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1265 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1267 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1268 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1269 bl sxlkUnlock ; Unlock the search list while we are invalidating
1271 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1272 eqv r26,r26,r26 ; Get all foxes here
1273 rldimi r24,r24,28,8 ; Make a couple copies up higher
1274 rldicr r29,r29,0,47 ; Isolate just the hash table base
1275 subfic r5,r5,46 ; Get number of leading zeros
1276 srd r26,r26,r5 ; Shift the size bits over
1277 mr r30,r27 ; Get start of chunk to invalidate
1278 rldicr r26,r26,0,56 ; Make length in PTEG units
1279 add r22,r4,r30 ; Get end page number
1281 hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1282 rldicr r0,r0,0,49 ; Clean all but segment portion
1283 rlwinm r2,r30,0,16,31 ; Get the current page index
1284 xor r0,r0,r24 ; Form VSID
1285 xor r8,r2,r0 ; Hash the vaddr
1286 sldi r8,r8,7 ; Make into PTEG offset
1287 and r23,r8,r26 ; Wrap into the hash table
1288 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1289 subfic r3,r3,-4 ; Get the PCA entry offset
1290 add r7,r3,r29 ; Point to the PCA slot
1292 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1294 bl mapLockPteg ; Lock the PTEG
1296 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1297 add r5,r23,r29 ; Point to the PTEG
1298 li r0,0 ; Set an invalid PTE value
1299 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1300 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1301 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1304 bf 0,hrmSlot0s ; No autogen here
1305 std r0,0x00(r5) ; Invalidate PTE
1307 hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1308 std r0,0x10(r5) ; Invalidate PTE
1310 hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1311 std r0,0x20(r5) ; Invalidate PTE
1313 hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1314 std r0,0x30(r5) ; Invalidate PTE
1316 hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1317 std r0,0x40(r5) ; Invalidate PTE
1319 hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1320 std r0,0x50(r5) ; Invalidate PTE
1322 hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1323 std r0,0x60(r5) ; Invalidate PTE
1325 hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1326 std r0,0x70(r5) ; Invalidate PTE
1328 hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1329 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1330 andc r6,r6,r0 ; Turn off all the old autogen bits
1332 hrmBNone64: eieio ; Make sure all updates come first
1333 stw r6,0(r7) ; Unlock and set the PCA
1335 addi r30,r30,1 ; bump to the next PTEG
1336 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1338 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1340 mr r3,r31 ; Copy the pointer to the mapping
1341 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1343 sync ; Make sure memory is consistent
1345 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1346 li r6,255 ; Assume full invalidate for now
1347 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1348 andc r6,r6,r5 ; Clear max if we have less to do
1349 and r5,r25,r5 ; Clear count if we have more than max
1350 sldi r24,r24,28 ; Get the full XOR value over to segment position
1351 ld r27,mpVAddr(r31) ; Get the base vaddr
1352 li r7,tlbieLock ; Get the TLBIE lock
1353 or r5,r5,r6 ; Get number of TLBIEs needed
1355 hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1356 mr. r2,r2 ; Is it locked?
1357 li r2,1 ; Get our lock value
1358 bne-- hrmBTLBlcm ; It is locked, go wait...
1359 stwcx. r2,0,r7 ; Try to get it
1360 bne-- hrmBTLBlcl ; We was beat...
1362 hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1363 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1364 addic. r5,r5,-1 ; See if we did them all
1365 xor r2,r2,r24 ; Make the VSID
1366 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1367 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1369 tlbie r2 ; Invalidate it everywhere
1370 addi r27,r27,0x1000 ; Up to the next page
1371 bge++ hrmBTLBj ; Make sure we have done it all...
1373 eieio ; Make sure that the tlbie happens first
1374 tlbsync ; wait for everyone to catch up
1376 li r2,0 ; Lock clear value
1378 ptesync ; Wait for quiet again
1380 stw r2,tlbieLock(0) ; Clear the tlbie lock
1382 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1383 bl sxlkShared ; Go get a shared lock on the mapping lists
1384 mr. r3,r3 ; Did we get the lock?
1385 bne- hrmPanic ; Nope...
1387 lwz r4,mpVAddr(r31) ; High order of address
1388 lwz r5,mpVAddr+4(r31) ; Low order of address
1389 mr r3,r28 ; Pass in pmap to search
1390 mr r29,r4 ; Save this in case we need it (only promote fails)
1391 mr r30,r5 ; Save this in case we need it (only promote fails)
1392 bl EXT(mapSearchFull) ; Go see if we can find it
1394 mr. r3,r3 ; Did we? (And remember mapping address for later)
1395 mr r15,r4 ; Save top of next vaddr
1396 mr r16,r5 ; Save bottom of next vaddr
1397 beq- hrmPanic ; Nope, not found...
1399 cmpld r3,r31 ; Same mapping?
1400 bne- hrmPanic ; Not good...
1402 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1403 bl sxlkPromote ; Try to promote shared to exclusive
1404 mr. r3,r3 ; Could we?
1405 mr r3,r31 ; Restore the mapping pointer
1406 beq+ hrmBDone2 ; Yeah...
1408 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1409 bl sxlkConvert ; Convert shared to exclusive
1410 mr. r3,r3 ; Could we?
1411 bne-- hrmPanic ; Nope, we must have timed out...
1413 mr r3,r28 ; Pass in pmap to search
1414 mr r4,r29 ; High order of address
1415 mr r5,r30 ; Low order of address
1416 bl EXT(mapSearchFull) ; Rescan the list
1418 mr. r3,r3 ; Did we lose it when we converted?
1419 mr r15,r4 ; Save top of next vaddr
1420 mr r16,r5 ; Save bottom of next vaddr
1421 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1423 hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1425 mr r3,r28 ; Get the pmap to remove from
1426 mr r4,r31 ; Point to the mapping
1427 bl EXT(mapRemove) ; Remove the mapping from the list
1429 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1430 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1431 subi r4,r4,1 ; Drop down the mapped page count
1432 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1433 bl sxlkUnlock ; Unlock the search list
1435 b hrmRetn64 ; We are all done, get out...
1437 hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1438 stwcx. r2,0,r2 ; Unreserve it
1440 hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1441 mr. r2,r2 ; Is it held?
1442 beq++ hrmBTLBlcl ; Nope...
1443 b hrmBTLBlcn ; Yeah...
1446 ; Guest shadow assist -- mapping remove
1448 ; Method of operation:
1449 ; o Locate the VMM extension block and the host pmap
1450 ; o Obtain the host pmap's search lock exclusively
1451 ; o Locate the requested mapping in the shadow hash table,
1453 ; o If connected, disconnect the PTE and gather R&C to physent
1454 ; o Locate and lock the physent
1455 ; o Remove mapping from physent's chain
1457 ; o Unlock pmap's search lock
1459 ; Non-volatile registers on entry:
1460 ; r17: caller's msr image
1461 ; r19: sprg2 (feature flags)
1462 ; r28: guest pmap's physical address
1463 ; r29: high-order 32 bits of guest virtual address
1464 ; r30: low-order 32 bits of guest virtual address
1466 ; Non-volatile register usage:
1467 ; r26: VMM extension block's physical address
1468 ; r27: host pmap's physical address
1469 ; r28: guest pmap's physical address
1470 ; r29: physent's physical address
1471 ; r30: guest virtual address
1472 ; r31: guest mapping's physical address
1476 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1477 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1478 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1479 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1480 b hrmGStart ; Join common code
1482 hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1483 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1484 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1486 hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1487 bl sxlkExclusive ; Get lock exclusive
1489 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1491 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1492 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1493 srwi r11,r30,12 ; Form shadow hash:
1494 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1495 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1496 ; Form index offset from hash page number
1497 add r31,r31,r12 ; r31 <- hash page index entry
1498 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1499 mtctr r0 ; in this group
1500 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1501 lwz r31,4(r31) ; r31 <- hash page paddr
1502 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1503 ; r31 <- hash group paddr
1505 addi r3,r3,1 ; Increment remove request count
1506 stw r3,vxsGrm(r26) ; Update remove request count
1508 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1509 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1510 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1511 b hrmG32SrchLp ; Let the search begin!
1515 mr r6,r3 ; r6 <- current mapping slot's flags
1516 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1517 mr r7,r4 ; r7 <- current mapping slot's space ID
1518 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1519 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1520 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1521 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1522 xor r7,r7,r9 ; Compare space ID
1523 or r0,r11,r7 ; r0 <- !(free && space match)
1524 xor r8,r8,r30 ; Compare virtual address
1525 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1526 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1528 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1529 bdnz hrmG32SrchLp ; Iterate
1531 mr r6,r3 ; r6 <- current mapping slot's flags
1532 clrrwi r5,r5,12 ; Remove flags from virtual address
1533 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1534 xor r4,r4,r9 ; Compare space ID
1535 or r0,r11,r4 ; r0 <- !(free && space match)
1536 xor r5,r5,r30 ; Compare virtual address
1537 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1538 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1539 b hrmGSrchMiss ; No joy in our hash group
1542 ld r31,0(r31) ; r31 <- hash page paddr
1543 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1544 ; r31 <- hash group paddr
1545 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1546 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1547 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1548 b hrmG64SrchLp ; Let the search begin!
1552 mr r6,r3 ; r6 <- current mapping slot's flags
1553 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1554 mr r7,r4 ; r7 <- current mapping slot's space ID
1555 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1556 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1557 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1558 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1559 xor r7,r7,r9 ; Compare space ID
1560 or r0,r11,r7 ; r0 <- !(free && space match)
1561 xor r8,r8,r30 ; Compare virtual address
1562 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1563 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1565 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1566 bdnz hrmG64SrchLp ; Iterate
1568 mr r6,r3 ; r6 <- current mapping slot's flags
1569 clrrdi r5,r5,12 ; Remove flags from virtual address
1570 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1571 xor r4,r4,r9 ; Compare space ID
1572 or r0,r11,r4 ; r0 <- !(free && space match)
1573 xor r5,r5,r30 ; Compare virtual address
1574 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1575 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1577 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1578 li r25,mapRtNotFnd ; Return not found
1579 addi r3,r3,1 ; Increment miss count
1580 stw r3,vxsGrmMiss(r26) ; Update miss count
1581 b hrmGReturn ; Join guest return
1585 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1586 bne hrmGDormant ; Yes, nothing to disconnect
1588 lwz r3,vxsGrmActive(r26) ; Get active hit count
1589 addi r3,r3,1 ; Increment active hit count
1590 stw r3,vxsGrmActive(r26) ; Update hit count
1592 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1593 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1594 ; r31 <- mapping's physical address
1595 ; r3 -> PTE slot physical address
1596 ; r4 -> High-order 32 bits of PTE
1597 ; r5 -> Low-order 32 bits of PTE
1599 ; r7 -> PCA physical address
1600 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1601 b hrmGFreePTE ; Join 64-bit path to release the PTE
1603 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1604 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1606 mr. r3,r3 ; Was there a valid PTE?
1607 beq hrmGDormant ; No valid PTE, we're almost done
1608 lis r0,0x8000 ; Prepare free bit for this slot
1609 srw r0,r0,r2 ; Position free bit
1610 or r6,r6,r0 ; Set it in our PCA image
1611 lwz r8,mpPte(r31) ; Get PTE offset
1612 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1613 stw r8,mpPte(r31) ; Save invalidated PTE offset
1614 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1615 stw r6,0(r7) ; Update PCA and unlock the PTEG
1618 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1619 bl mapFindLockPN ; Find 'n' lock this page's physent
1620 mr. r29,r3 ; Got lock on our physent?
1621 beq-- hrmGBadPLock ; No, time to bail out
1623 crset cr1_eq ; cr1_eq <- previous link is the anchor
1624 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1625 la r11,ppLink+4(r29) ; Point to chain anchor
1626 lwz r9,ppLink+4(r29) ; Get chain anchor
1627 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1629 beq- hrmGPEMissMiss ; End of chain, this is not good
1630 cmplw r9,r31 ; Is this the mapping to remove?
1631 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1632 bne hrmGRemNext ; No, chain onward
1633 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1634 stw r8,0(r11) ; Unchain gpv->phys mapping
1635 b hrmGDelete ; Finish deleting mapping
1637 lwarx r0,0,r11 ; Get previous link
1638 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1639 stwcx. r0,0,r11 ; Update previous link
1640 bne- hrmGRemRetry ; Lost reservation, retry
1641 b hrmGDelete ; Finish deleting mapping
1644 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1645 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1646 mr. r9,r8 ; Does next entry exist?
1647 b hrmGRemLoop ; Carry on
1650 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1651 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1652 la r11,ppLink(r29) ; Point to chain anchor
1653 ld r9,ppLink(r29) ; Get chain anchor
1654 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1656 beq-- hrmGPEMissMiss ; End of chain, this is not good
1657 cmpld r9,r31 ; Is this the mapping to remove?
1658 ld r8,mpAlias(r9) ; Get forward chain pinter
1659 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1660 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1661 std r8,0(r11) ; Unchain gpv->phys mapping
1662 b hrmGDelete ; Finish deleting mapping
1664 ldarx r0,0,r11 ; Get previous link
1665 and r0,r0,r7 ; Get flags
1666 or r0,r0,r8 ; Insert new forward pointer
1667 stdcx. r0,0,r11 ; Slam it back in
1668 bne-- hrmGRem64Rt ; Lost reservation, retry
1669 b hrmGDelete ; Finish deleting mapping
1673 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1674 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1675 mr. r9,r8 ; Does next entry exist?
1676 b hrmGRem64Lp ; Carry on
1679 mr r3,r29 ; r3 <- physent addr
1680 bl mapPhysUnlock ; Unlock physent chain
1681 lwz r3,mpFlags(r31) ; Get mapping's flags
1682 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1683 ori r3,r3,mpgFree ; Mark mapping free
1684 stw r3,mpFlags(r31) ; Update flags
1685 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1688 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1689 bl sxlkUnlock ; Release host pmap search lock
1691 mr r3,r25 ; r3 <- return code
1692 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1693 mtmsr r17 ; Restore 'rupts, translation
1694 isync ; Throw a small wrench into the pipeline
1695 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1696 hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1697 b hrmRetnCmn ; Join common return
1701 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1702 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1703 li r3,failMapping ; All the way from New Orleans
1708 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1710 * Upon entry, R3 contains a pointer to a physent.
1712 * This function removes the first mapping from a physical entry
1713 * alias list. It locks the list, extracts the vaddr and pmap from
1714 * the first entry. It then jumps into the hw_rem_map function.
1715 * NOTE: since we jump into rem_map, we need to set up the stack
1716 * identically. Also, we set the next parm to 0 so we do not
1717 * try to save a next vaddr.
1719 * We return the virtual address of the removed mapping as a
1722 * Note that this is designed to be called from 32-bit mode with a stack.
1724 * We disable translation and all interruptions here. This keeps is
1725 * from having to worry about a deadlock due to having anything locked
1726 * and needing it to process a fault.
1728 * Note that this must be done with both interruptions off and VM off
1731 * Remove mapping via physical page (mapping_purge)
1734 * 2) extract vaddr and pmap
1736 * 4) do "remove mapping via pmap"
1742 .globl EXT(hw_purge_phys)
1745 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1746 mflr r0 ; Save the link register
1747 stw r15,FM_ARG0+0x00(r1) ; Save a register
1748 stw r16,FM_ARG0+0x04(r1) ; Save a register
1749 stw r17,FM_ARG0+0x08(r1) ; Save a register
1750 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1751 stw r19,FM_ARG0+0x10(r1) ; Save a register
1752 stw r20,FM_ARG0+0x14(r1) ; Save a register
1753 stw r21,FM_ARG0+0x18(r1) ; Save a register
1754 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1755 stw r23,FM_ARG0+0x20(r1) ; Save a register
1756 stw r24,FM_ARG0+0x24(r1) ; Save a register
1757 stw r25,FM_ARG0+0x28(r1) ; Save a register
1758 li r6,0 ; Set no next address return
1759 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1760 stw r27,FM_ARG0+0x30(r1) ; Save a register
1761 stw r28,FM_ARG0+0x34(r1) ; Save a register
1762 stw r29,FM_ARG0+0x38(r1) ; Save a register
1763 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1764 stw r31,FM_ARG0+0x40(r1) ; Save a register
1765 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1766 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1768 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1770 bl mapPhysLock ; Lock the physent
1772 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1774 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1775 li r0,ppFlags ; Set the bottom stuff to clear
1776 b hppJoin ; Join the common...
1778 hppSF: li r0,ppLFAmask
1779 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1780 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1782 hppJoin: andc. r12,r12,r0 ; Clean and test link
1783 beq-- hppNone ; There are no more mappings on physical page
1785 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1786 lhz r7,mpSpace(r12) ; Get the address space hash
1787 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1788 slwi r0,r7,2 ; Multiply space by 4
1789 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1790 slwi r7,r7,3 ; Multiply space by 8
1791 lwz r5,mpVAddr+4(r12) ; and the bottom
1792 add r7,r7,r0 ; Get correct displacement into translate table
1793 lwz r28,0(r28) ; Get the actual translation map
1795 add r28,r28,r7 ; Point to the pmap translation
1797 bl mapPhysUnlock ; Time to unlock the physical entry
1799 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1801 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1802 b hrmJoin ; Go remove the mapping...
1804 hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1805 b hrmJoin ; Go remove the mapping...
1809 hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1811 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1813 mtmsr r11 ; Restore enables/translation/etc.
1815 b hppRetnCmn ; Join the common return code...
1817 hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1821 ; NOTE: we have not used any registers other than the volatiles to this point
1824 hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1826 li r3,mapRtEmpty ; Physent chain is empty
1827 mtlr r12 ; Restore the return
1828 lwz r1,0(r1) ; Pop the stack
1832 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1834 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1835 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1837 * We return the virtual address of the removed mapping as a
1840 * Note that this is designed to be called from 32-bit mode with a stack.
1842 * We disable translation and all interruptions here. This keeps is
1843 * from having to worry about a deadlock due to having anything locked
1844 * and needing it to process a fault.
1846 * Note that this must be done with both interruptions off and VM off
1848 * Remove a mapping which can be reestablished by VM
1853 .globl EXT(hw_purge_map)
1856 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1857 mflr r0 ; Save the link register
1858 stw r15,FM_ARG0+0x00(r1) ; Save a register
1859 stw r16,FM_ARG0+0x04(r1) ; Save a register
1860 stw r17,FM_ARG0+0x08(r1) ; Save a register
1861 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1862 stw r19,FM_ARG0+0x10(r1) ; Save a register
1863 mfsprg r19,2 ; Get feature flags
1864 stw r20,FM_ARG0+0x14(r1) ; Save a register
1865 stw r21,FM_ARG0+0x18(r1) ; Save a register
1866 mtcrf 0x02,r19 ; move pf64Bit cr6
1867 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1868 stw r23,FM_ARG0+0x20(r1) ; Save a register
1869 stw r24,FM_ARG0+0x24(r1) ; Save a register
1870 stw r25,FM_ARG0+0x28(r1) ; Save a register
1871 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1872 stw r27,FM_ARG0+0x30(r1) ; Save a register
1873 stw r28,FM_ARG0+0x34(r1) ; Save a register
1874 stw r29,FM_ARG0+0x38(r1) ; Save a register
1875 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1876 stw r31,FM_ARG0+0x40(r1) ; Save a register
1877 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1878 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1881 lwz r11,pmapFlags(r3) ; Get pmaps flags
1882 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1883 bne hpmPanic ; Call not valid for guest shadow assist pmap
1886 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1887 lwz r9,pmapvr+4(r3) ; Get conversion mask
1890 hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1893 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1895 xor r28,r3,r9 ; Convert the pmap to physical addressing
1897 mr r17,r11 ; Save the MSR
1899 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1900 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1901 mr. r3,r3 ; Did we get the lock?
1902 bne-- hrmBadLock ; Nope...
1904 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
1905 ; here so that we will know the previous elements so we can dequeue them
1909 mr r3,r28 ; Pass in pmap to search
1910 mr r29,r4 ; Top half of vaddr
1911 mr r30,r5 ; Bottom half of vaddr
1912 bl EXT(mapSearchFull) ; Rescan the list
1913 mr. r31,r3 ; Did we? (And remember mapping address for later)
1914 or r0,r4,r5 ; Are we beyond the end?
1915 mr r15,r4 ; Save top of next vaddr
1916 cmplwi cr1,r0,0 ; See if there is another
1917 mr r16,r5 ; Save bottom of next vaddr
1918 bne-- hpmGotOne ; We found one, go check it out...
1920 hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1921 b hrmNotFound ; No more in pmap to check...
1923 hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1924 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
1925 rlwinm r21,r20,8,24,31 ; Extract the busy count
1926 cmplwi cr2,r21,0 ; Is it busy?
1927 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
1928 beq++ hrmGotX ; Found, branch to remove the mapping...
1929 b hpmCNext ; Nope...
1931 hpmPanic: lis r0,hi16(Choke) ; System abend
1932 ori r0,r0,lo16(Choke) ; System abend
1933 li r3,failMapping ; Show that we failed some kind of mapping thing
1937 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1939 * Upon entry, R3 contains a pointer to a pmap.
1940 * pa is a pointer to the physent
1942 * This function removes the first mapping for a specific pmap from a physical entry
1943 * alias list. It locks the list, extracts the vaddr and pmap from
1944 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1945 * NOTE: since we jump into rem_map, we need to set up the stack
1946 * identically. Also, we set the next parm to 0 so we do not
1947 * try to save a next vaddr.
1949 * We return the virtual address of the removed mapping as a
1952 * Note that this is designed to be called from 32-bit mode with a stack.
1954 * We disable translation and all interruptions here. This keeps is
1955 * from having to worry about a deadlock due to having anything locked
1956 * and needing it to process a fault.
1958 * Note that this must be done with both interruptions off and VM off
1961 * Remove mapping via physical page (mapping_purge)
1964 * 2) extract vaddr and pmap
1966 * 4) do "remove mapping via pmap"
1972 .globl EXT(hw_purge_space)
1974 LEXT(hw_purge_space)
1975 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1976 mflr r0 ; Save the link register
1977 stw r15,FM_ARG0+0x00(r1) ; Save a register
1978 stw r16,FM_ARG0+0x04(r1) ; Save a register
1979 stw r17,FM_ARG0+0x08(r1) ; Save a register
1980 mfsprg r2,2 ; Get feature flags
1981 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1982 stw r19,FM_ARG0+0x10(r1) ; Save a register
1983 stw r20,FM_ARG0+0x14(r1) ; Save a register
1984 stw r21,FM_ARG0+0x18(r1) ; Save a register
1985 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1986 mtcrf 0x02,r2 ; move pf64Bit cr6
1987 stw r23,FM_ARG0+0x20(r1) ; Save a register
1988 stw r24,FM_ARG0+0x24(r1) ; Save a register
1989 stw r25,FM_ARG0+0x28(r1) ; Save a register
1990 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1991 stw r27,FM_ARG0+0x30(r1) ; Save a register
1992 li r6,0 ; Set no next address return
1993 stw r28,FM_ARG0+0x34(r1) ; Save a register
1994 stw r29,FM_ARG0+0x38(r1) ; Save a register
1995 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1996 stw r31,FM_ARG0+0x40(r1) ; Save a register
1997 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1998 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2001 lwz r11,pmapFlags(r4) ; Get pmaps flags
2002 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2003 bne hpsPanic ; Call not valid for guest shadow assist pmap
2006 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
2008 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
2012 hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2014 hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2016 xor r4,r4,r9 ; Convert the pmap to physical addressing
2018 bl mapPhysLock ; Lock the physent
2020 lwz r8,pmapSpace(r4) ; Get the space hash
2022 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2024 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2026 hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2027 beq hpsNone ; Did not find one...
2029 lhz r10,mpSpace(r12) ; Get the space
2031 cmplw r10,r8 ; Is this one of ours?
2034 lwz r12,mpAlias+4(r12) ; Chain on to the next
2035 b hpsSrc32 ; Check it out...
2039 hpsSF: li r0,ppLFAmask
2040 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2041 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2043 hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2044 beq hpsNone ; Did not find one...
2046 lhz r10,mpSpace(r12) ; Get the space
2048 cmplw r10,r8 ; Is this one of ours?
2051 ld r12,mpAlias(r12) ; Chain on to the next
2052 b hpsSrc64 ; Check it out...
2056 hpsFnd: mr r28,r4 ; Set the pmap physical address
2057 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2058 lwz r5,mpVAddr+4(r12) ; and the bottom
2060 bl mapPhysUnlock ; Time to unlock the physical entry
2061 b hrmJoin ; Go remove the mapping...
2065 hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2067 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
2069 mtmsr r11 ; Restore enables/translation/etc.
2071 b hpsRetnCmn ; Join the common return code...
2073 hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2077 ; NOTE: we have not used any registers other than the volatiles to this point
2080 hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2082 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
2083 mtlr r12 ; Restore the return
2084 lwz r1,0(r1) ; Pop the stack
2087 hpsPanic: lis r0,hi16(Choke) ; System abend
2088 ori r0,r0,lo16(Choke) ; System abend
2089 li r3,failMapping ; Show that we failed some kind of mapping thing
2093 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2094 * on this physent chain
2096 * Locates the first guest mapping on the physent chain that is associated with the
2097 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2098 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2099 * repeatedly until no additional guest mappings that match our criteria are removed.
2101 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2104 * r3 : physent, 32-bit kernel virtual address
2105 * r4 : host pmap, 32-bit kernel virtual address
2107 * Volatile register usage (for linkage through hrmJoin):
2108 * r4 : high-order 32 bits of guest virtual address
2109 * r5 : low-order 32 bits of guest virtual address
2110 * r11: saved MSR image
2112 * Non-volatile register usage:
2113 * r26: VMM extension block's physical address
2114 * r27: host pmap's physical address
2115 * r28: guest pmap's physical address
2120 .globl EXT(hw_scrub_guest)
2122 LEXT(hw_scrub_guest)
2123 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2124 mflr r0 ; Save the link register
2125 stw r15,FM_ARG0+0x00(r1) ; Save a register
2126 stw r16,FM_ARG0+0x04(r1) ; Save a register
2127 stw r17,FM_ARG0+0x08(r1) ; Save a register
2128 mfsprg r2,2 ; Get feature flags
2129 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2130 stw r19,FM_ARG0+0x10(r1) ; Save a register
2131 stw r20,FM_ARG0+0x14(r1) ; Save a register
2132 stw r21,FM_ARG0+0x18(r1) ; Save a register
2133 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2134 mtcrf 0x02,r2 ; move pf64Bit cr6
2135 stw r23,FM_ARG0+0x20(r1) ; Save a register
2136 stw r24,FM_ARG0+0x24(r1) ; Save a register
2137 stw r25,FM_ARG0+0x28(r1) ; Save a register
2138 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2139 stw r27,FM_ARG0+0x30(r1) ; Save a register
2140 li r6,0 ; Set no next address return
2141 stw r28,FM_ARG0+0x34(r1) ; Save a register
2142 stw r29,FM_ARG0+0x38(r1) ; Save a register
2143 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2144 stw r31,FM_ARG0+0x40(r1) ; Save a register
2145 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2146 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2148 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2150 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2151 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2152 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2153 b hsgStart ; Get to work
2155 hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2156 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2158 hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2159 xor r27,r4,r9 ; Convert host pmap_t virt->real
2160 bl mapPhysLock ; Lock the physent
2162 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2164 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2165 hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2166 beq hsg32Miss ; Did not find one...
2167 lwz r8,mpFlags(r12) ; Get mapping's flags
2168 lhz r7,mpSpace(r12) ; Get mapping's space id
2169 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2170 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2171 xori r8,r8,mpGuest ; Is it a guest mapping?
2172 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2173 slwi r9,r7,2 ; Multiply space by 4
2174 lwz r28,0(r28) ; Get the actual translation map
2175 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2176 slwi r7,r7,3 ; Multiply space by 8
2177 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2178 add r7,r7,r9 ; Get correct displacement into translate table
2179 add r28,r28,r7 ; Point to the pmap translation
2180 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2181 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2182 xor r7,r7,r26 ; Is guest associated with specified host?
2183 or. r7,r7,r8 ; Guest mapping && associated with host?
2184 lwz r12,mpAlias+4(r12) ; Chain on to the next
2185 bne hsg32Loop ; Try next mapping on alias chain
2187 hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2188 b hrmJoin ; Join common path for mapping removal
2191 hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2192 mtmsr r11 ; Restore 'rupts, translation
2193 isync ; Throw a small wrench into the pipeline
2194 li r3,mapRtEmpty ; No mappings found matching specified criteria
2195 b hrmRetnCmn ; Exit through common epilog
2198 hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2199 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2200 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2201 hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2202 beq hsg64Miss ; Did not find one...
2203 lwz r8,mpFlags(r12) ; Get mapping's flags
2204 lhz r7,mpSpace(r12) ; Get mapping's space id
2205 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2206 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2207 xori r8,r8,mpGuest ; Is it a guest mapping?
2208 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2209 slwi r9,r7,2 ; Multiply space by 4
2210 lwz r28,0(r28) ; Get the actual translation map
2211 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2212 slwi r7,r7,3 ; Multiply space by 8
2213 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2214 add r7,r7,r9 ; Get correct displacement into translate table
2215 add r28,r28,r7 ; Point to the pmap translation
2216 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2217 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2218 xor r7,r7,r26 ; Is guest associated with specified host?
2219 or. r7,r7,r8 ; Guest mapping && associated with host?
2220 ld r12,mpAlias(r12) ; Chain on to the next
2221 bne hsg64Loop ; Try next mapping on alias chain
2223 hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2224 b hrmJoin ; Join common path for mapping removal
2227 hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
2228 mtmsrd r11 ; Restore 'rupts, translation
2229 li r3,mapRtEmpty ; No mappings found matching specified criteria
2230 b hrmRetnCmn ; Exit through common epilog
2234 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2236 * Upon entry, R3 contains a pointer to a physent.
2237 * space is the space ID from the pmap in question
2239 * We return the virtual address of the found mapping in
2240 * R3. Note that the mapping busy is bumped.
2242 * Note that this is designed to be called from 32-bit mode with a stack.
2244 * We disable translation and all interruptions here. This keeps is
2245 * from having to worry about a deadlock due to having anything locked
2246 * and needing it to process a fault.
2251 .globl EXT(hw_find_space)
2254 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2255 mflr r0 ; Save the link register
2256 mr r8,r4 ; Remember the space
2257 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2259 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2261 bl mapPhysLock ; Lock the physent
2263 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2265 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2267 hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2268 beq hfsNone ; Did not find one...
2270 lhz r10,mpSpace(r12) ; Get the space
2272 cmplw r10,r8 ; Is this one of ours?
2275 lwz r12,mpAlias+4(r12) ; Chain on to the next
2276 b hfsSrc32 ; Check it out...
2280 hfsSF: li r0,ppLFAmask
2281 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2282 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2284 hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2285 beq hfsNone ; Did not find one...
2287 lhz r10,mpSpace(r12) ; Get the space
2289 cmplw r10,r8 ; Is this one of ours?
2292 ld r12,mpAlias(r12) ; Chain on to the next
2293 b hfsSrc64 ; Check it out...
2297 hfsFnd: mr r8,r3 ; Save the physent
2298 mr r3,r12 ; Point to the mapping
2299 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2301 mr r3,r8 ; Get back the physical entry
2302 li r7,0xFFF ; Get a page size mask
2303 bl mapPhysUnlock ; Time to unlock the physical entry
2305 andc r3,r12,r7 ; Move the mapping back down to a page
2306 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2307 xor r12,r3,r12 ; Convert to virtual
2308 b hfsRet ; Time to return
2312 hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2314 hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
2316 mtmsr r11 ; Restore enables/translation/etc.
2318 b hfsRetnCmn ; Join the common return code...
2320 hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2324 ; NOTE: we have not used any registers other than the volatiles to this point
2327 hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
2330 mr. r3,r3 ; Anything to return?
2331 beq hfsRetnNull ; Nope
2332 lwz r11,mpFlags(r3) ; Get mapping flags
2333 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2334 cmplwi r0,mpGuest ; Shadow guest mapping?
2335 beq hfsPanic ; Yup, kick the bucket
2339 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2341 mtlr r12 ; Restore the return
2342 lwz r1,0(r1) ; Pop the stack
2345 hfsPanic: lis r0,hi16(Choke) ; System abend
2346 ori r0,r0,lo16(Choke) ; System abend
2347 li r3,failMapping ; Show that we failed some kind of mapping thing
2351 ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2352 ; Returns 0 if not found or the virtual address of the mapping if
2353 ; if is. Also, the mapping has the busy count bumped.
2356 .globl EXT(hw_find_map)
2359 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2360 mflr r0 ; Save the link register
2361 stw r25,FM_ARG0+0x00(r1) ; Save a register
2362 stw r26,FM_ARG0+0x04(r1) ; Save a register
2363 mr r25,r6 ; Remember address of next va
2364 stw r27,FM_ARG0+0x08(r1) ; Save a register
2365 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2366 stw r29,FM_ARG0+0x10(r1) ; Save a register
2367 stw r30,FM_ARG0+0x14(r1) ; Save a register
2368 stw r31,FM_ARG0+0x18(r1) ; Save a register
2369 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2372 lwz r11,pmapFlags(r3) ; Get pmaps flags
2373 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2374 bne hfmPanic ; Call not valid for guest shadow assist pmap
2377 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2378 lwz r7,pmapvr+4(r3) ; Get the second part
2381 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2383 mr r27,r11 ; Remember the old MSR
2384 mr r26,r12 ; Remember the feature bits
2386 xor r28,r3,r7 ; Change the common 32- and 64-bit half
2388 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
2390 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
2392 hfmSF1: mr r29,r4 ; Save top half of vaddr
2393 mr r30,r5 ; Save the bottom half
2395 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2396 bl sxlkShared ; Go get a shared lock on the mapping lists
2397 mr. r3,r3 ; Did we get the lock?
2398 bne-- hfmBadLock ; Nope...
2400 mr r3,r28 ; get the pmap address
2401 mr r4,r29 ; Get bits 0:31 to look for
2402 mr r5,r30 ; Get bits 32:64
2404 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
2406 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2407 mr. r31,r3 ; Save the mapping if we found it
2408 cmplwi cr1,r0,0 ; Are we removing?
2409 mr r29,r4 ; Save next va high half
2410 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2411 mr r30,r5 ; Save next va low half
2412 li r6,0 ; Assume we did not find it
2413 li r26,0xFFF ; Get a mask to relocate to start of mapping page
2415 bt-- cr0_eq,hfmNotFnd ; We did not find it...
2417 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2419 andc r4,r31,r26 ; Get back to the mapping page start
2421 ; Note: we can treat 32- and 64-bit the same here. Because we are going from
2422 ; physical to virtual and we only do 32-bit virtual, we only need the low order
2425 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2426 li r6,-1 ; Indicate we found it and it is not being removed
2427 xor r31,r31,r4 ; Flip to virtual
2429 hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2430 bl sxlkUnlock ; Unlock the search list
2432 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2433 and r3,r3,r6 ; Clear if not found or removing
2435 hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
2437 mtmsr r27 ; Restore enables/translation/etc.
2439 b hfmReturnC ; Join common...
2441 hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2444 hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2445 stw r30,4(r25) ; Save the bottom of the next va
2446 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2447 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2448 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2449 and r3,r3,r6 ; Clear return if the mapping is being removed
2450 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2451 mtlr r0 ; Restore the return
2452 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2453 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2454 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2455 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2456 lwz r1,0(r1) ; Pop the stack
2461 hfmBadLock: li r3,1 ; Set lock time out error code
2462 b hfmReturn ; Leave....
2464 hfmPanic: lis r0,hi16(Choke) ; System abend
2465 ori r0,r0,lo16(Choke) ; System abend
2466 li r3,failMapping ; Show that we failed some kind of mapping thing
2471 * void hw_clear_maps(void)
2473 * Remove all mappings for all phys entries.
2479 .globl EXT(hw_clear_maps)
2482 mflr r10 ; Save the link register
2483 mfcr r9 ; Save the condition register
2484 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2486 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2487 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2490 lwz r3,mrPhysTab(r5) ; Get the actual table address
2491 lwz r0,mrStart(r5) ; Get start of table entry
2492 lwz r4,mrEnd(r5) ; Get end of table entry
2493 addi r5,r5,mrSize ; Point to the next regions
2495 cmplwi r3,0 ; No more regions?
2496 beq-- hcmDone ; Leave...
2498 sub r4,r4,r0 ; Calculate physical entry count
2502 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2506 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2507 addi r3,r3,physEntrySize ; Next phys_entry
2510 rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address
2511 beq hcmNoMap32 ; Did not find one...
2513 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2514 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2515 stw r0,mpPte(r4) ; Get the quick pointer again
2517 lwz r4,mpAlias+4(r4) ; Chain on to the next
2518 b hcmNextMap32 ; Check it out...
2526 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2527 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2528 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2529 addi r3,r3,physEntrySize ; Next phys_entry
2532 andc. r4,r4,r0 ; Clean and test mapping address
2533 beq hcmNoMap64 ; Did not find one...
2535 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2536 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2537 stw r0,mpPte(r4) ; Get the quick pointer again
2539 ld r4,mpAlias(r4) ; Chain on to the next
2540 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2541 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2542 b hcmNextMap64 ; Check it out...
2550 mtlr r10 ; Restore the return
2551 mtcr r9 ; Restore the condition register
2552 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2554 mtmsr r11 ; Restore translation/mode/etc.
2559 mtmsrd r11 ; Restore translation/mode/etc.
2566 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
2567 * walks all mapping for a physical page and performs
2568 * specified operations on each.
2570 * pp is unlocked physent
2571 * preop is operation to perform on physent before walk. This would be
2572 * used to set cache attribute or protection
2573 * op is the operation to perform on each mapping during walk
2574 * postop is operation to perform in the phsyent after walk. this would be
2575 * used to set or reset the RC bits.
2576 * opmod modifies the action taken on any connected PTEs visited during
2579 * We return the RC bits from before postop is run.
2581 * Note that this is designed to be called from 32-bit mode with a stack.
2583 * We disable translation and all interruptions here. This keeps is
2584 * from having to worry about a deadlock due to having anything locked
2585 * and needing it to process a fault.
2587 * We lock the physent, execute preop, and then walk each mapping in turn.
2588 * If there is a PTE, it is invalidated and the RC merged into the physent.
2589 * Then we call the op function.
2590 * Then we revalidate the PTE.
2591 * Once all all mappings are finished, we save the physent RC and call the
2592 * postop routine. Then we unlock the physent and return the RC.
2598 .globl EXT(hw_walk_phys)
2601 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2602 mflr r0 ; Save the link register
2603 stw r24,FM_ARG0+0x00(r1) ; Save a register
2604 stw r25,FM_ARG0+0x04(r1) ; Save a register
2605 stw r26,FM_ARG0+0x08(r1) ; Save a register
2606 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2607 mr r24,r8 ; Save the parm
2608 mr r25,r7 ; Save the parm
2609 stw r28,FM_ARG0+0x10(r1) ; Save a register
2610 stw r29,FM_ARG0+0x14(r1) ; Save a register
2611 stw r30,FM_ARG0+0x18(r1) ; Save a register
2612 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2613 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2615 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2617 mfsprg r26,0 ; (INSTRUMENTATION)
2618 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2619 addi r27,r27,1 ; (INSTRUMENTATION)
2620 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2621 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2622 slwi r12,r24,2 ; (INSTRUMENTATION)
2623 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2624 addi r27,r27,1 ; (INSTRUMENTATION)
2625 stwx r27,r26,r12 ; (INSTRUMENTATION)
2627 mr r26,r11 ; Save the old MSR
2628 lis r27,hi16(hwpOpBase) ; Get high order of op base
2629 slwi r4,r4,7 ; Convert preop to displacement
2630 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2631 slwi r5,r5,7 ; Convert op to displacement
2632 add r12,r4,r27 ; Point to the preop routine
2633 slwi r28,r6,7 ; Convert postop to displacement
2634 mtctr r12 ; Set preop routine
2635 add r28,r28,r27 ; Get the address of the postop routine
2636 add r27,r5,r27 ; Get the address of the op routine
2638 bl mapPhysLock ; Lock the physent
2640 mr r29,r3 ; Save the physent address
2642 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2644 bctrl ; Call preop routine
2645 bne- hwpEarly32 ; preop says to bail now...
2647 cmplwi r24,hwpMergePTE ; Classify operation modifier
2648 mtctr r27 ; Set up the op function address
2649 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2650 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2651 beq hwpMSrc32 ; Do TLB merge for each mapping
2653 hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2654 beq hwpNone32 ; Did not find one...
2656 bctrl ; Call the op function
2658 bne- hwpEarly32 ; op says to bail now...
2659 lwz r31,mpAlias+4(r31) ; Chain on to the next
2660 b hwpQSrc32 ; Check it out...
2663 hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2664 beq hwpNone32 ; Did not find one...
2666 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2667 bctrl ; Call the op function
2669 bne- hwpEarly32 ; op says to bail now...
2670 lwz r31,mpAlias+4(r31) ; Chain on to the next
2671 b hwpMSrc32 ; Check it out...
2674 hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2675 beq hwpNone32 ; Did not find one...
2678 ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2679 ; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2680 ; If there is no PTE, PTE low is obtained from mapping
2682 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2684 bctrl ; Call the op function
2686 crmove cr1_eq,cr0_eq ; Save the return code
2688 mr. r3,r3 ; Was there a previously valid PTE?
2689 beq- hwpNxt32 ; Nope...
2691 stw r5,4(r3) ; Store second half of PTE
2692 eieio ; Make sure we do not reorder
2693 stw r4,0(r3) ; Revalidate the PTE
2695 eieio ; Make sure all updates come first
2696 stw r6,0(r7) ; Unlock the PCA
2698 hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2699 lwz r31,mpAlias+4(r31) ; Chain on to the next
2700 b hwpSrc32 ; Check it out...
2704 hwpNone32: mtctr r28 ; Get the post routine address
2706 lwz r30,ppLink+4(r29) ; Save the old RC
2707 mr r3,r29 ; Get the physent address
2708 bctrl ; Call post routine
2710 bl mapPhysUnlock ; Unlock the physent
2712 mtmsr r26 ; Restore translation/mode/etc.
2715 b hwpReturn ; Go restore registers and return...
2719 hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2720 mr r3,r29 ; Get the physent address
2721 bl mapPhysUnlock ; Unlock the physent
2723 mtmsr r26 ; Restore translation/mode/etc.
2726 b hwpReturn ; Go restore registers and return...
2730 hwp64: bctrl ; Call preop routine
2731 bne-- hwpEarly64 ; preop says to bail now...
2733 cmplwi r24,hwpMergePTE ; Classify operation modifier
2734 mtctr r27 ; Set up the op function address
2737 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2738 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2739 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2740 beq hwpMSrc64 ; Do TLB merge for each mapping
2742 hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2743 beq hwpNone64 ; Did not find one...
2745 bctrl ; Call the op function
2747 bne-- hwpEarly64 ; op says to bail now...
2748 ld r31,mpAlias(r31) ; Chain on to the next
2749 b hwpQSrc64 ; Check it out...
2752 hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2753 beq hwpNone64 ; Did not find one...
2755 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2756 bctrl ; Call the op function
2758 bne-- hwpEarly64 ; op says to bail now...
2759 ld r31,mpAlias(r31) ; Chain on to the next
2760 b hwpMSrc64 ; Check it out...
2763 hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2764 beq hwpNone64 ; Did not find one...
2766 ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2767 ; PTE low in R5. PTEG comes back locked if there is one
2769 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2771 bctrl ; Call the op function
2773 crmove cr1_eq,cr0_eq ; Save the return code
2775 mr. r3,r3 ; Was there a previously valid PTE?
2776 beq-- hwpNxt64 ; Nope...
2778 std r5,8(r3) ; Save bottom of PTE
2779 eieio ; Make sure we do not reorder
2780 std r4,0(r3) ; Revalidate the PTE
2782 eieio ; Make sure all updates come first
2783 stw r6,0(r7) ; Unlock the PCA
2785 hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2786 ld r31,mpAlias(r31) ; Chain on to the next
2787 b hwpSrc64 ; Check it out...
2791 hwpNone64: mtctr r28 ; Get the post routine address
2793 lwz r30,ppLink+4(r29) ; Save the old RC
2794 mr r3,r29 ; Get the physent address
2795 bctrl ; Call post routine
2797 bl mapPhysUnlock ; Unlock the physent
2799 mtmsrd r26 ; Restore translation/mode/etc.
2801 b hwpReturn ; Go restore registers and return...
2805 hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2806 mr r3,r29 ; Get the physent address
2807 bl mapPhysUnlock ; Unlock the physent
2809 mtmsrd r26 ; Restore translation/mode/etc.
2812 hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2813 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2814 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2815 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
2816 mr r3,r30 ; Pass back the RC
2817 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2818 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
2819 mtlr r0 ; Restore the return
2820 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2821 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2822 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
2823 lwz r1,0(r1) ; Pop the stack
2828 ; The preop/op/postop function table.
2829 ; Each function must be 64-byte aligned and be no more than
2830 ; 16 instructions. If more than 16, we must fix address calculations
2831 ; at the start of hwpOpBase
2833 ; The routine must set CR0_EQ in order to continue scan.
2834 ; If CR0_EQ is not set, an early return from the function is made.
2841 ; Function 0 - No operation
2843 hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2844 blr ; Just return...
2848 ; This is the continuation of function 4 - Set attributes in mapping
2850 ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2851 ; NOTE: Do we have to deal with i-cache here?
2853 hwpSAM: li r11,4096 ; Get page size
2855 hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2856 dcbf r11,r5 ; Flush the line in the data cache
2857 bgt++ hwpSAMinvd ; Go do the rest of it...
2859 sync ; Make sure it is done
2861 li r11,4096 ; Get page size
2863 hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2864 icbi r11,r5 ; Flush the line in the icache
2865 bgt++ hwpSAMinvi ; Go do the rest of it...
2867 sync ; Make sure it is done
2869 cmpw r0,r0 ; Make sure we return CR0_EQ
2873 ; Function 1 - Set protection in physent (obsolete)
2875 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2877 hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
2881 ; Function 2 - Set protection in mapping
2883 ; NOTE: Changes to no-execute permission are ignored
2885 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
2887 hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2888 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2889 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2890 li r0,lo16(mpPP) ; Get protection bits
2891 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2892 rlwinm r2,r25,0,mpPP ; Isolate new protection bits
2893 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2894 andc r5,r5,r0 ; Clear the old prot bits
2895 or r5,r5,r2 ; Move in the new prot bits
2896 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2897 cmpw r0,r0 ; Make sure we return CR0_EQ
2898 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2901 ; Function 3 - Set attributes in physent
2903 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
2905 hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
2907 hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2908 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
2909 stwcx. r4,r5,r29 ; Try to stuff it
2910 bne-- hwpSAtrPhX ; Try again...
2911 ; Note: CR0_EQ is set because of stwcx.
2914 ; Function 4 - Set attributes in mapping
2916 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2918 hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2919 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2920 li r2,mpM ; Force on coherent
2921 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2922 li r0,lo16(mpWIMG) ; Get wimg mask
2923 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2924 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2925 ; Copy in the cache inhibited bit
2926 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2927 andc r5,r5,r0 ; Clear the old wimg
2928 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2929 ; Copy in the guarded bit
2930 mfsprg r9,2 ; Feature flags
2931 or r5,r5,r2 ; Move in the new wimg
2932 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2933 lwz r2,mpPAddr(r31) ; Get the physical address
2934 li r0,0xFFF ; Start a mask
2935 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2936 rlwinm r5,r0,0,1,0 ; Copy to top half
2937 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2938 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2939 and r5,r5,r2 ; Clean stuff in top 32 bits
2940 andc r2,r2,r0 ; Clean bottom too
2941 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2942 b hwpSAM ; Join common
2944 ; NOTE: we moved the remainder of the code out of here because it
2945 ; did not fit in the 128 bytes allotted. It got stuck into the free space
2946 ; at the end of the no-op function.
2951 ; Function 5 - Clear reference in physent
2953 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
2955 hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2957 hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2958 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2959 stwcx. r4,r5,r29 ; Try to stuff it
2960 bne-- hwpCRefPhX ; Try again...
2961 ; Note: CR0_EQ is set because of stwcx.
2965 ; Function 6 - Clear reference in mapping
2967 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
2969 hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2970 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2971 andc r5,r5,r0 ; Clear in PTE copy
2972 andc r8,r8,r0 ; and in the mapping
2973 cmpw r0,r0 ; Make sure we return CR0_EQ
2974 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2978 ; Function 7 - Clear change in physent
2980 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
2982 hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2984 hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2985 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2986 stwcx. r4,r5,r29 ; Try to stuff it
2987 bne-- hwpCCngPhX ; Try again...
2988 ; Note: CR0_EQ is set because of stwcx.
2992 ; Function 8 - Clear change in mapping
2994 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2996 hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2997 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2998 andc r5,r5,r0 ; Clear in PTE copy
2999 andc r8,r8,r0 ; and in the mapping
3000 cmpw r0,r0 ; Make sure we return CR0_EQ
3001 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3005 ; Function 9 - Set reference in physent
3007 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
3009 hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3011 hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
3012 ori r4,r4,lo16(ppR) ; Set the reference
3013 stwcx. r4,r5,r29 ; Try to stuff it
3014 bne-- hwpSRefPhX ; Try again...
3015 ; Note: CR0_EQ is set because of stwcx.
3019 ; Function 10 - Set reference in mapping
3021 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3023 hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3024 ori r8,r8,lo16(mpR) ; Set reference in mapping
3025 cmpw r0,r0 ; Make sure we return CR0_EQ
3026 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3029 ; Function 11 - Set change in physent
3031 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
3033 hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3035 hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3036 ori r4,r4,lo16(ppC) ; Set the change bit
3037 stwcx. r4,r5,r29 ; Try to stuff it
3038 bne-- hwpSCngPhX ; Try again...
3039 ; Note: CR0_EQ is set because of stwcx.
3042 ; Function 12 - Set change in mapping
3044 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
3046 hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3047 ori r8,r8,lo16(mpC) ; Set chage in mapping
3048 cmpw r0,r0 ; Make sure we return CR0_EQ
3049 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3052 ; Function 13 - Test reference in physent
3054 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3056 hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3057 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3058 blr ; Return (CR0_EQ set to continue if reference is off)...
3061 ; Function 14 - Test reference in mapping
3063 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
3065 hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3066 blr ; Return (CR0_EQ set to continue if reference is off)...
3069 ; Function 15 - Test change in physent
3071 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
3073 hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3074 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
3075 blr ; Return (CR0_EQ set to continue if change is off)...
3078 ; Function 16 - Test change in mapping
3080 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
3082 hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
3083 blr ; Return (CR0_EQ set to continue if change is off)...
3086 ; Function 17 - Test reference and change in physent
3088 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3091 lwz r0,ppLink+4(r29) ; Get the flags from physent
3092 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3093 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3094 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3095 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3098 ; Function 18 - Test reference and change in mapping
3100 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3102 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3103 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3104 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3105 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3108 ; Function 19 - Clear reference and change in physent
3110 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3112 li r5,ppLink+4 ; Get offset for flag part of physent
3115 lwarx r4,r5,r29 ; Get the old flags
3116 andc r4,r4,r25 ; Clear R and C as specified by mask
3117 stwcx. r4,r5,r29 ; Try to stuff it
3118 bne-- hwpCRefCngPhX ; Try again...
3119 ; Note: CR0_EQ is set because of stwcx.
3123 ; Function 20 - Clear reference and change in mapping
3125 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3127 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3128 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3129 andc r5,r5,r0 ; Clear in PTE copy
3130 andc r8,r8,r0 ; and in the mapping
3131 cmpw r0,r0 ; Make sure we return CR0_EQ
3132 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3136 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
3139 ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
3142 ; mapRtOK - if all is ok
3143 ; mapRtBadLk - if mapping lock fails
3144 ; mapRtPerm - if mapping is permanent
3145 ; mapRtNotFnd - if mapping is not found
3146 ; mapRtBlock - if mapping is a block
3149 .globl EXT(hw_protect)
3152 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3153 mflr r0 ; Save the link register
3154 stw r24,FM_ARG0+0x00(r1) ; Save a register
3155 stw r25,FM_ARG0+0x04(r1) ; Save a register
3156 mr r25,r7 ; Remember address of next va
3157 stw r26,FM_ARG0+0x08(r1) ; Save a register
3158 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3159 stw r28,FM_ARG0+0x10(r1) ; Save a register
3160 mr r24,r6 ; Save the new protection flags
3161 stw r29,FM_ARG0+0x14(r1) ; Save a register
3162 stw r30,FM_ARG0+0x18(r1) ; Save a register
3163 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3164 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3167 lwz r11,pmapFlags(r3) ; Get pmaps flags
3168 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3169 bne hpPanic ; Call not valid for guest shadow assist pmap
3172 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3173 lwz r7,pmapvr+4(r3) ; Get the second part
3176 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3178 mr r27,r11 ; Remember the old MSR
3179 mr r26,r12 ; Remember the feature bits
3181 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3183 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3185 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3187 hpSF1: mr r29,r4 ; Save top half of vaddr
3188 mr r30,r5 ; Save the bottom half
3190 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3191 bl sxlkShared ; Go get a shared lock on the mapping lists
3192 mr. r3,r3 ; Did we get the lock?
3193 bne-- hpBadLock ; Nope...
3195 mr r3,r28 ; get the pmap address
3196 mr r4,r29 ; Get bits 0:31 to look for
3197 mr r5,r30 ; Get bits 32:64
3199 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
3201 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3202 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3203 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3204 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3205 mr. r31,r3 ; Save the mapping if we found it
3206 mr r29,r4 ; Save next va high half
3207 mr r30,r5 ; Save next va low half
3209 beq-- hpNotFound ; Not found...
3211 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
3213 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3215 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3217 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
3218 mr. r3,r3 ; Was there a previously valid PTE?
3220 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3222 beq-- hpNoOld32 ; Nope...
3224 stw r5,4(r3) ; Store second half of PTE
3225 eieio ; Make sure we do not reorder
3226 stw r4,0(r3) ; Revalidate the PTE
3228 eieio ; Make sure all updates come first
3229 stw r6,0(r7) ; Unlock PCA
3231 hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3232 bl sxlkUnlock ; Unlock the search list
3234 li r3,mapRtOK ; Set normal return
3235 b hpR32 ; Join common...
3240 hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3242 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
3243 mr. r3,r3 ; Was there a previously valid PTE?
3245 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3247 beq-- hpNoOld64 ; Nope...
3249 std r5,8(r3) ; Store second half of PTE
3250 eieio ; Make sure we do not reorder
3251 std r4,0(r3) ; Revalidate the PTE
3253 eieio ; Make sure all updates come first
3254 stw r6,0(r7) ; Unlock PCA
3256 hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3257 bl sxlkUnlock ; Unlock the search list
3259 li r3,mapRtOK ; Set normal return
3260 b hpR64 ; Join common...
3264 hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3266 hpR32: mtmsr r27 ; Restore enables/translation/etc.
3268 b hpReturnC ; Join common...
3270 hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3273 hpReturnC: stw r29,0(r25) ; Save the top of the next va
3274 stw r30,4(r25) ; Save the bottom of the next va
3275 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3276 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3277 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3278 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3279 mtlr r0 ; Restore the return
3280 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3281 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3282 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3283 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3284 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3285 lwz r1,0(r1) ; Pop the stack
3290 hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3291 b hpReturn ; Leave....
3293 hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3294 bl sxlkUnlock ; Unlock the search list
3296 li r3,mapRtNotFnd ; Set that we did not find the requested page
3297 b hpReturn ; Leave....
3300 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3301 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3302 bne-- hpNotFound ; Yeah...
3303 bl sxlkUnlock ; Unlock the search list
3305 li r3,mapRtBlock ; Assume it was a block
3306 rlwinm r0,r7,0,mpType ; Isolate mapping type
3307 cmplwi r0,mpBlock ; Is this a block mapping?
3308 beq++ hpReturn ; Yes, leave...
3310 li r3,mapRtPerm ; Set that we hit a permanent page
3311 b hpReturn ; Leave....
3313 hpPanic: lis r0,hi16(Choke) ; System abend
3314 ori r0,r0,lo16(Choke) ; System abend
3315 li r3,failMapping ; Show that we failed some kind of mapping thing
3320 ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3322 ; Returns following code ORed with RC from mapping
3323 ; mapRtOK - if all is ok
3324 ; mapRtBadLk - if mapping lock fails
3325 ; mapRtNotFnd - if mapping is not found
3328 .globl EXT(hw_test_rc)
3331 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3332 mflr r0 ; Save the link register
3333 stw r24,FM_ARG0+0x00(r1) ; Save a register
3334 stw r25,FM_ARG0+0x04(r1) ; Save a register
3335 stw r26,FM_ARG0+0x08(r1) ; Save a register
3336 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3337 stw r28,FM_ARG0+0x10(r1) ; Save a register
3338 mr r24,r6 ; Save the reset request
3339 stw r29,FM_ARG0+0x14(r1) ; Save a register
3340 stw r30,FM_ARG0+0x18(r1) ; Save a register
3341 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3342 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3345 lwz r11,pmapFlags(r3) ; Get pmaps flags
3346 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3347 bne htrPanic ; Call not valid for guest shadow assist pmap
3350 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3351 lwz r7,pmapvr+4(r3) ; Get the second part
3354 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3356 mr r27,r11 ; Remember the old MSR
3357 mr r26,r12 ; Remember the feature bits
3359 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3361 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
3363 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3365 htrSF1: mr r29,r4 ; Save top half of vaddr
3366 mr r30,r5 ; Save the bottom half
3368 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3369 bl sxlkShared ; Go get a shared lock on the mapping lists
3370 mr. r3,r3 ; Did we get the lock?
3372 bne-- htrBadLock ; Nope...
3374 mr r3,r28 ; get the pmap address
3375 mr r4,r29 ; Get bits 0:31 to look for
3376 mr r5,r30 ; Get bits 32:64
3378 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
3380 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3381 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3382 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3383 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3384 mr. r31,r3 ; Save the mapping if we found it
3385 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
3387 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
3389 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3391 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3393 cmplwi cr1,r24,0 ; Do we want to clear RC?
3394 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3395 mr. r3,r3 ; Was there a previously valid PTE?
3396 li r0,lo16(mpR|mpC) ; Get bits to clear
3398 and r25,r5,r0 ; Save the RC bits
3399 beq++ cr1,htrNoClr32 ; Nope...
3401 andc r12,r12,r0 ; Clear mapping copy of RC
3402 andc r5,r5,r0 ; Clear PTE copy of RC
3403 sth r12,mpVAddr+6(r31) ; Set the new RC
3405 htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
3407 sth r5,6(r3) ; Store updated RC
3408 eieio ; Make sure we do not reorder
3409 stw r4,0(r3) ; Revalidate the PTE
3411 eieio ; Make sure all updates come first
3412 stw r6,0(r7) ; Unlock PCA
3414 htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3415 bl sxlkUnlock ; Unlock the search list
3416 li r3,mapRtOK ; Set normal return
3417 b htrR32 ; Join common...
3422 htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3424 cmplwi cr1,r24,0 ; Do we want to clear RC?
3425 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3426 mr. r3,r3 ; Was there a previously valid PTE?
3427 li r0,lo16(mpR|mpC) ; Get bits to clear
3429 and r25,r5,r0 ; Save the RC bits
3430 beq++ cr1,htrNoClr64 ; Nope...
3432 andc r12,r12,r0 ; Clear mapping copy of RC
3433 andc r5,r5,r0 ; Clear PTE copy of RC
3434 sth r12,mpVAddr+6(r31) ; Set the new RC
3436 htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3438 sth r5,14(r3) ; Store updated RC
3439 eieio ; Make sure we do not reorder
3440 std r4,0(r3) ; Revalidate the PTE
3442 eieio ; Make sure all updates come first
3443 stw r6,0(r7) ; Unlock PCA
3445 htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3446 bl sxlkUnlock ; Unlock the search list
3447 li r3,mapRtOK ; Set normal return
3448 b htrR64 ; Join common...
3452 htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
3454 htrR32: mtmsr r27 ; Restore enables/translation/etc.
3456 b htrReturnC ; Join common...
3458 htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3461 htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3462 or r3,r3,r25 ; Send the RC bits back
3463 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3464 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3465 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3466 mtlr r0 ; Restore the return
3467 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3468 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3469 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3470 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3471 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3472 lwz r1,0(r1) ; Pop the stack
3477 htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3478 b htrReturn ; Leave....
3481 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3482 bl sxlkUnlock ; Unlock the search list
3484 li r3,mapRtNotFnd ; Set that we did not find the requested page
3485 b htrReturn ; Leave....
3487 htrPanic: lis r0,hi16(Choke) ; System abend
3488 ori r0,r0,lo16(Choke) ; System abend
3489 li r3,failMapping ; Show that we failed some kind of mapping thing
3495 ; mapFindLockPN - find and lock physent for a given page number
3500 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3501 mr r2,r3 ; Save our target
3502 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3504 mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3505 lwz r5,mrStart(r9) ; Get start of table entry
3506 lwz r0,mrEnd(r9) ; Get end of table entry
3507 addi r9,r9,mrSize ; Point to the next slot
3508 cmplwi cr7,r3,0 ; Are we at the end of the table?
3509 cmplw r2,r5 ; See if we are in this table
3510 cmplw cr1,r2,r0 ; Check end also
3511 sub r4,r2,r5 ; Calculate index to physical entry
3512 beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry...
3513 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3514 slwi r4,r4,3 ; Get offset to physical entry
3516 blt-- mapFLPNitr ; Did not find it...
3518 add r3,r3,r4 ; Point right to the slot
3519 b mapPhysLock ; Join common lock code
3522 li r3,0 ; Show that we did not find it
3527 ; mapPhysFindLock - find physent list and lock it
3528 ; R31 points to mapping
3533 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3534 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
3535 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
3536 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3537 add r3,r3,r4 ; Point to table entry
3538 lwz r5,mpPAddr(r31) ; Get physical page number
3539 lwz r7,mrStart(r3) ; Get the start of range
3540 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3541 sub r6,r5,r7 ; Get index to physent
3542 rlwinm r6,r6,3,0,28 ; Get offset to physent
3543 add r3,r3,r6 ; Point right to the physent
3544 b mapPhysLock ; Join in the lock...
3547 ; mapPhysLock - lock a physent list
3548 ; R3 contains list header
3553 li r2,lgKillResv ; Get a spot to kill reservation
3554 stwcx. r2,0,r2 ; Kill it...
3557 lwz r2,ppLink(r3) ; Get physent chain header
3558 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3559 bne-- mapPhysLockT ; Nope, still locked...
3562 lwarx r2,0,r3 ; Get the lock
3563 rlwinm. r0,r2,0,0,0 ; Is it locked?
3564 oris r0,r2,0x8000 ; Set the lock bit
3565 bne-- mapPhysLockS ; It is locked, spin on it...
3566 stwcx. r0,0,r3 ; Try to stuff it back...
3567 bne-- mapPhysLock ; Collision, try again...
3568 isync ; Clear any speculations
3573 ; mapPhysUnlock - unlock a physent list
3574 ; R3 contains list header
3579 lwz r0,ppLink(r3) ; Get physent chain header
3580 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3581 eieio ; Make sure unlock comes last
3582 stw r0,ppLink(r3) ; Unlock the list
3586 ; mapPhysMerge - merge the RC bits into the master copy
3587 ; R3 points to the physent
3588 ; R4 contains the RC bits
3590 ; Note: we just return if RC is 0
3595 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3596 la r5,ppLink+4(r3) ; Point to the RC field
3597 beqlr-- ; Leave if RC is 0...
3600 lwarx r6,0,r5 ; Get the RC part
3601 or r6,r6,r4 ; Merge in the RC
3602 stwcx. r6,0,r5 ; Try to stuff it back...
3603 bne-- mapPhysMergeT ; Collision, try again...
3607 ; Sets the physent link pointer and preserves all flags
3608 ; The list is locked
3609 ; R3 points to physent
3610 ; R4 has link to set
3616 la r5,ppLink+4(r3) ; Point to the link word
3619 lwarx r2,0,r5 ; Get the link and flags
3620 rlwimi r4,r2,0,ppFlags ; Insert the flags
3621 stwcx. r4,0,r5 ; Stick them back
3622 bne-- mapPhyCSetR ; Someone else did something, try again...
3628 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3629 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
3632 ldarx r2,0,r3 ; Get the link and flags
3633 and r5,r2,r0 ; Isolate the flags
3634 or r6,r4,r5 ; Add them to the link
3635 stdcx. r6,0,r3 ; Stick them back
3636 bne-- mapPhyCSet64x ; Someone else did something, try again...
3640 ; mapBumpBusy - increment the busy count on a mapping
3641 ; R3 points to mapping
3647 lwarx r4,0,r3 ; Get mpBusy
3648 addis r4,r4,0x0100 ; Bump the busy count
3649 stwcx. r4,0,r3 ; Save it back
3650 bne-- mapBumpBusy ; This did not work, try again...
3654 ; mapDropBusy - increment the busy count on a mapping
3655 ; R3 points to mapping
3658 .globl EXT(mapping_drop_busy)
3661 LEXT(mapping_drop_busy)
3663 lwarx r4,0,r3 ; Get mpBusy
3664 addis r4,r4,0xFF00 ; Drop the busy count
3665 stwcx. r4,0,r3 ; Save it back
3666 bne-- mapDropBusy ; This did not work, try again...
3670 ; mapDrainBusy - drain the busy count on a mapping
3671 ; R3 points to mapping
3672 ; Note: we already have a busy for ourselves. Only one
3673 ; busy per processor is allowed, so we just spin here
3674 ; waiting for the count to drop to 1.
3675 ; Also, the mapping can not be on any lists when we do this
3676 ; so all we are doing is waiting until it can be released.
3682 lwz r4,mpFlags(r3) ; Get mpBusy
3683 rlwinm r4,r4,8,24,31 ; Clean it up
3684 cmplwi r4,1 ; Is is just our busy?
3685 beqlr++ ; Yeah, it is clear...
3686 b mapDrainBusy ; Try again...
3691 ; handleDSeg - handle a data segment fault
3692 ; handleISeg - handle an instruction segment fault
3694 ; All that we do here is to map these to DSI or ISI and insure
3695 ; that the hash bit is not set. This forces the fault code
3696 ; to also handle the missing segment.
3698 ; At entry R2 contains per_proc, R13 contains savarea pointer,
3699 ; and R11 is the exception code.
3703 .globl EXT(handleDSeg)
3707 li r11,T_DATA_ACCESS ; Change fault to DSI
3708 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3709 b EXT(handlePF) ; Join common...
3712 .globl EXT(handleISeg)
3716 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3717 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3718 b EXT(handlePF) ; Join common...
3722 * handlePF - handle a page fault interruption
3724 * At entry R2 contains per_proc, R13 contains savarea pointer,
3725 * and R11 is the exception code.
3727 * This first part does a quick check to see if we can handle the fault.
3728 * We canot handle any kind of protection exceptions here, so we pass
3729 * them up to the next level.
3731 * NOTE: In order for a page-fault redrive to work, the translation miss
3732 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3733 * before we come here.
3737 .globl EXT(handlePF)
3741 mfsprg r12,2 ; Get feature flags
3742 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3743 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3744 mtcrf 0x02,r12 ; move pf64Bit to cr6
3745 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3746 lwz r18,SAVflags(r13) ; Get the flags
3748 beq-- gotIfetch ; We have an IFETCH here...
3750 lwz r27,savedsisr(r13) ; Get the DSISR
3751 lwz r29,savedar(r13) ; Get the first half of the DAR
3752 lwz r30,savedar+4(r13) ; And second half
3754 b ckIfProt ; Go check if this is a protection fault...
3756 gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3757 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3758 lwz r30,savesrr0+4(r13) ; And second half
3759 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3761 ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3762 li r20,64 ; Set a limit of 64 nests for sanity check
3763 bne-- hpfExit ; Yes... (probably not though)
3766 ; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3767 ; should be loading the user pmap here.
3770 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3771 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3772 mr r19,r2 ; Remember the per_proc
3773 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3774 mr r23,r30 ; Save the low part of faulting address
3775 beq-- hpfInKern ; Skip if we are in the kernel
3776 la r8,ppUserPmap(r19) ; Point to the current user pmap
3778 hpfInKern: mr r22,r29 ; Save the high part of faulting address
3780 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3783 ; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3784 ; predefined value that corresponds to no address space. When we see that value
3785 ; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3786 ; cause the proper SR to be loaded.
3789 lwz r28,4(r8) ; Pick up the pmap
3790 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3791 mr r25,r28 ; Save the original pmap (in case we nest)
3792 lwz r0,pmapFlags(r28) ; Get pmap's flags
3793 bne hpfGVtest ; Segs are not ours if so...
3794 mfsrin r4,r30 ; Get the SR that was used for translation
3795 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3796 bne++ hpfGVtest ; No...
3798 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3799 b hpfGVtest ; Join on up...
3803 nop ; Push hpfNest to a 32-byte boundary
3804 nop ; Push hpfNest to a 32-byte boundary
3805 nop ; Push hpfNest to a 32-byte boundary
3807 hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3808 mr r25,r28 ; Save the original pmap (in case we nest)
3809 lwz r0,pmapFlags(r28) ; Get pmap's flags
3811 hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3812 bne hpfGVxlate ; Yup, do accelerated shadow stuff
3815 ; This is where we loop descending nested pmaps
3818 hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3819 addi r20,r20,-1 ; Count nest try
3820 bl sxlkShared ; Go get a shared lock on the mapping lists
3821 mr. r3,r3 ; Did we get the lock?
3822 bne-- hpfBadLock ; Nope...
3824 mr r3,r28 ; Get the pmap pointer
3825 mr r4,r22 ; Get top of faulting vaddr
3826 mr r5,r23 ; Get bottom of faulting vaddr
3827 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3829 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3830 mr. r31,r3 ; Save the mapping if we found it
3831 cmplwi cr1,r0,0 ; Check for removal
3832 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3834 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3836 rlwinm r0,r7,0,mpType ; Isolate mapping type
3837 cmplwi r0,mpNest ; Are we again nested?
3838 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3839 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
3840 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3842 lhz r21,mpSpace(r31) ; Get the space
3844 bne++ hpfFoundIt ; No, we found our guy...
3847 #if pmapTransSize != 12
3848 #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3850 cmplwi r0,mpLinkage ; Linkage mapping?
3851 cmplwi cr1,r20,0 ; Too many nestings?
3852 beq-- hpfSpclNest ; Do we need to do special handling?
3854 hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3855 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3856 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3857 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3858 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3859 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3860 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3861 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3862 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3863 slwi r11,r21,3 ; Multiply space by 8
3864 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3865 addc r23,r23,r9 ; Relocate bottom half of vaddr
3866 lwz r10,0(r10) ; Get the actual translation map
3867 slwi r12,r21,2 ; Multiply space by 4
3868 add r10,r10,r11 ; Add in the higher part of the index
3869 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3870 adde r22,r22,r8 ; Relocate the top half of the vaddr
3871 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3872 bl sxlkUnlock ; Unlock the search list
3874 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
3875 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3876 cmplwi r28,0 ; Is the pmap paddr valid?
3877 bne+ hpfNest ; Nest into new pmap...
3878 b hpfBadPmap ; Handle bad pmap
3881 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3882 cmpldi r28,0 ; Is the pmap paddr valid?
3883 bne++ hpfNest ; Nest into new pmap...
3884 b hpfBadPmap ; Handle bad pmap
3888 ; Error condition. We only allow 64 nestings. This keeps us from having to
3889 ; check for recusive nests when we install them.
3895 lwz r20,savedsisr(r13) ; Get the DSISR
3896 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3897 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3898 ori r20,r20,1 ; Indicate that there was a nesting problem
3899 stw r20,savedsisr(r13) ; Stash it
3900 lwz r11,saveexception(r13) ; Restore the exception code
3901 b EXT(PFSExit) ; Yes... (probably not though)
3904 ; Error condition - lock failed - this is fatal
3910 lis r0,hi16(Choke) ; System abend
3911 ori r0,r0,lo16(Choke) ; System abend
3912 li r3,failMapping ; Show mapping failure
3916 ; Error condition - space id selected an invalid pmap - fatal
3922 lis r0,hi16(Choke) ; System abend
3923 ori r0,r0,lo16(Choke) ; System abend
3924 li r3,failPmap ; Show invalid pmap
3928 ; Did not find any kind of mapping
3934 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3935 bl sxlkUnlock ; Unlock it
3936 lwz r11,saveexception(r13) ; Restore the exception code
3938 hpfExit: ; We need this because we can not do a relative branch
3939 b EXT(PFSExit) ; Yes... (probably not though)
3943 ; Here is where we handle special mappings. So far, the only use is to load a
3944 ; processor specific segment register for copy in/out handling.
3946 ; The only (so far implemented) special map is used for copyin/copyout.
3947 ; We keep a mapping of a "linkage" mapping in the per_proc.
3948 ; The linkage mapping is basically a nested pmap that is switched in
3949 ; as part of context switch. It relocates the appropriate user address
3950 ; space slice into the right place in the kernel.
3956 la r31,ppUMWmp(r19) ; Just point to the mapping
3957 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
3958 b hpfCSrch ; Go continue search...
3962 ; We have now found a mapping for the address we faulted on.
3966 ; Here we go about calculating what the VSID should be. We concatanate
3967 ; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3968 ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3969 ; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3972 ; This is used both for segment handling and PTE handling
3977 #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3980 ; Important non-volatile registers at this point ('home' means the final pmap/mapping found
3981 ; when a multi-level mapping has been successfully searched):
3982 ; r21: home space id number
3983 ; r22: relocated high-order 32 bits of vaddr
3984 ; r23: relocated low-order 32 bits of vaddr
3985 ; r25: pmap physical address
3987 ; r28: home pmap physical address
3988 ; r29: high-order 32 bits of faulting vaddr
3989 ; r30: low-order 32 bits of faulting vaddr
3990 ; r31: mapping's physical address
3994 hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3995 hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3996 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3997 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3998 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3999 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
4000 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
4001 cmplwi cr5,r0,0 ; Did we just do a special nesting?
4002 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
4003 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
4004 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
4005 xor r14,r14,r20 ; Calculate the top half of VSID
4006 xor r15,r15,r21 ; Calculate the bottom half of the VSID
4007 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
4008 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
4009 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
4010 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
4011 or r12,r12,r15 ; Add key into the bottom of VSID
4013 ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4015 bne++ hpfPteMiss ; Nope, normal PTE miss...
4018 ; Here is the only place that we make an entry in the pmap segment cache.
4020 ; Note that we do not make an entry in the segment cache for special
4021 ; nested mappings. This makes the copy in/out segment get refreshed
4022 ; when switching threads.
4024 ; The first thing that we do is to look up the ESID we are going to load
4025 ; into a segment in the pmap cache. If it is already there, this is
4026 ; a segment that appeared since the last time we switched address spaces.
4027 ; If all is correct, then it was another processors that made the cache
4028 ; entry. If not, well, it is an error that we should die on, but I have
4029 ; not figured a good way to trap it yet.
4031 ; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4032 ; an entry based on the generation number, update the cache entry, and
4033 ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4034 ; entries that correspond to the last 4 bits (32:35 for 64-bit and
4035 ; 0:3 for 32-bit) of the ESID.
4037 ; Then we unlock and bail.
4039 ; First lock it. Then select a free slot or steal one based on the generation
4040 ; number. Then store it, update the allocation flags, and unlock.
4042 ; The cache entry contains an image of the ESID/VSID pair we would load for
4043 ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4045 ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4046 ; the current one, which may have changed because we nested.
4048 ; Also remember that we do not store the valid bit in the ESID. If we
4049 ; od, this will break some other stuff.
4052 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4054 mr r3,r25 ; Point to the pmap
4055 mr r4,r29 ; ESID high half
4056 mr r5,r30 ; ESID low half
4057 bl pmapCacheLookup ; Go see if this is in the cache already
4059 mr. r3,r3 ; Did we find it?
4060 mr r4,r11 ; Copy this to a different register
4062 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4064 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4065 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4067 cntlzw r7,r4 ; Find a free slot
4069 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4070 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4071 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4072 addi r5,r4,1 ; Bump the generation number
4073 and r7,r7,r6 ; Clear bit number if none empty
4074 andc r8,r4,r6 ; Clear generation count if we found an empty
4075 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4076 or r7,r7,r8 ; Select a slot number
4078 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4079 oris r8,r8,0x8000 ; Get the high bit on
4080 la r9,pmapSegCache(r25) ; Point to the segment cache
4081 slwi r6,r7,4 ; Get index into the segment cache
4082 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4083 srw r8,r8,r7 ; Get the mask
4084 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4086 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4087 oris r0,r0,0xF000 ; Get the sub-tag mask
4088 add r9,r9,r6 ; Point to the cache slot
4089 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4090 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4092 stw r29,sgcESID(r9) ; Save the top of the ESID
4093 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4094 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4095 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4096 or r10,r10,r5 ; Stick in subtag in case top half
4097 or r11,r11,r5 ; Stick in subtag in case bottom half
4098 stw r14,sgcVSID(r9) ; Save the top of the VSID
4099 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4100 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4101 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4103 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4104 b hpfNoCacheEnt ; Go finish up...
4107 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4111 eieio ; Make sure cache is updated before lock
4112 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4116 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4117 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4120 ; Make and enter 32-bit segment register
4123 lwz r16,validSegs(r19) ; Get the valid SR flags
4124 xor r12,r12,r4 ; Alter the storage key before loading segment register
4125 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4126 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4127 lis r0,0x8000 ; Set bit 0
4128 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4129 srw r0,r0,r2 ; Get bit corresponding to SR
4130 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4131 or r16,r16,r0 ; Show that SR is valid
4133 mtsrin r6,r30 ; Set the actual SR
4135 stw r16,validSegs(r19) ; Set the valid SR flags
4137 b hpfPteMiss ; SR loaded, go do a PTE...
4140 ; Make and enter 64-bit segment look-aside buffer entry.
4141 ; Note that the cache entry is the right format except for valid bit.
4142 ; We also need to convert from long long to 64-bit register values.
4149 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4150 sldi r8,r29,32 ; Move high order address over
4151 sldi r10,r14,32 ; Move high part of VSID over
4153 not r3,r16 ; Make valids be 0s
4154 li r0,1 ; Prepare to set bit 0
4156 cntlzd r17,r3 ; Find a free SLB
4157 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4158 or r9,r8,r30 ; Form full 64-bit address
4159 cmplwi r17,63 ; Did we find a free SLB entry?
4160 sldi r0,r0,63 ; Get bit 0 set
4161 or r10,r10,r12 ; Move in low part and keys
4162 addi r17,r17,1 ; Skip SLB 0 always
4163 blt++ hpfFreeSeg ; Yes, go load it...
4166 ; No free SLB entries, select one that is in use and invalidate it
4168 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4169 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4170 addi r4,r4,1 ; Set next slot to steal
4171 slbmfee r7,r17 ; Get the entry that is in the selected spot
4172 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4173 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4174 srawi r2,r2,31 ; Get -1 if steal index still in range
4175 slbie r7 ; Invalidate the in-use SLB entry
4176 and r4,r4,r2 ; Reset steal index when it should wrap
4179 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4181 ; We are now ready to stick the SLB entry in the SLB and mark it in use
4185 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4186 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4187 srd r0,r0,r4 ; Set bit mask for allocation
4188 oris r9,r9,0x0800 ; Turn on the valid bit
4189 or r16,r16,r0 ; Turn on the allocation flag
4190 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4192 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4193 slbie r7 ; Blow away a potential duplicate
4195 hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4197 std r16,validSegs(r19) ; Mark as valid
4198 b hpfPteMiss ; STE loaded, go do a PTE...
4201 ; The segment has been set up and loaded if need be. Now we are ready to build the
4202 ; PTE and get it into the hash table.
4204 ; Note that there is actually a race here. If we start fault processing on
4205 ; a different pmap, i.e., we have descended into a nested pmap, it is possible
4206 ; that the nest could have been removed from the original pmap. We would
4207 ; succeed with this translation anyway. I do not think we need to worry
4208 ; about this (famous last words) because nobody should be unnesting anything
4209 ; if there are still people activily using them. It should be up to the
4210 ; higher level VM system to put the kibosh on this.
4212 ; There is also another race here: if we fault on the same mapping on more than
4213 ; one processor at the same time, we could end up with multiple PTEs for the same
4214 ; mapping. This is not a good thing.... We really only need one of the
4215 ; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4216 ; the mapping. If we see that set, we just abandon the handler and hope that by
4217 ; the time we restore context and restart the interrupted code, the fault has
4218 ; been resolved by the other guy. If not, we will take another fault.
4222 ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4223 ; It is required to stay there until after we call mapSelSlot!!!!
4228 hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4229 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4230 li r3,mpHValid ; Get the PTE valid bit
4231 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4232 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4233 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4234 and. r12,r12,r3 ; Isolate the valid bit
4235 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4236 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
4237 rlwinm r0,r2,0,mpType ; Isolate mapping type
4238 cmplwi r0,mpBlock ; Is this a block mapping?
4239 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
4240 stwcx. r2,0,r31 ; Store the flags
4241 bne-- hpfPteMiss ; Collision, try again...
4243 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4246 ; At this point we are about to do the 32-bit PTE generation.
4248 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4252 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4253 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4254 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4256 ; The 24 bits of the 32-bit architecture VSID is in the following:
4260 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4261 ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4262 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4267 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4268 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4270 mfsdr1 r27 ; Get the hash table base address
4272 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4273 rlwinm r18,r23,10,26,31 ; Extract the API
4274 xor r19,r15,r0 ; Calculate hash << 12
4275 mr r2,r25 ; Save the flag part of the mapping
4276 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4277 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4278 rlwinm r25,r25,0,0,19 ; Clear out the flags
4279 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4280 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4281 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4282 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4283 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4284 add r24,r24,r25 ; Adjust to true physical address
4285 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4286 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4287 and r19,r19,r16 ; Wrap hash table offset into the hash table
4288 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4289 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4290 add r19,r19,r27 ; Point to the PTEG
4291 subfic r20,r20,-4 ; Get negative offset to PCA
4292 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4293 add r20,r20,r27 ; Point to the PCA slot
4296 ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4297 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4299 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4300 ; that some other processor beat us and stuck in a PTE or that
4301 ; all we had was a simple segment exception and the PTE was there the whole time.
4302 ; If we find one a pointer, we are done.
4305 mr r7,r20 ; Copy the PCA pointer
4306 bl mapLockPteg ; Lock the PTEG
4308 lwz r12,mpPte(r31) ; Get the offset to the PTE
4309 mr r17,r6 ; Remember the PCA image
4310 mr r16,r6 ; Prime the post-select PCA image
4311 andi. r0,r12,mpHValid ; Is there a PTE here already?
4312 li r21,8 ; Get the number of slots
4314 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4316 bne- hpfBailOut ; Someone already did this for us...
4319 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
4320 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4321 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4322 ; R4 returns the slot index.
4324 ; REMEMBER: CR7 indicates that we are building a block mapping.
4327 hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4328 mr r6,r17 ; Get back the original PCA
4329 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4330 blt- hpfBailOut ; Holy Cow, all slots are locked...
4332 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4334 cmplwi cr5,r3,1 ; Did we steal a slot?
4335 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
4336 mr r16,r6 ; Remember the PCA image after selection
4337 blt+ cr5,hpfInser32 ; Nope, no steal...
4339 lwz r6,0(r19) ; Get the old PTE
4340 lwz r7,4(r19) ; Get the real part of the stealee
4341 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4342 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4343 srwi r3,r7,12 ; Change phys address to a ppnum
4344 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4345 cmplwi cr1,r3,0 ; Check if this is in RAM
4346 bne- hpfNoPte32 ; Could not get it, try for another...
4348 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4350 hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4352 sync ; Make sure the invalid is stored
4353 li r9,tlbieLock ; Get the TLBIE lock
4354 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4356 hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4357 mfsprg r4,0 ; Get the per_proc
4358 rlwinm r8,r6,25,18,31 ; Extract the space ID
4359 rlwinm r11,r6,25,18,31 ; Extract the space ID
4360 lwz r7,hwSteals(r4) ; Get the steal count
4361 srwi r2,r6,7 ; Align segment number with hash
4362 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4363 mr. r0,r0 ; Is it locked?
4364 srwi r0,r19,6 ; Align PTEG offset for back hash
4365 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4366 xor r11,r11,r0 ; Hash backwards to partial vaddr
4367 rlwinm r12,r2,14,0,3 ; Shift segment up
4368 mfsprg r2,2 ; Get feature flags
4369 li r0,1 ; Get our lock word
4370 rlwimi r12,r6,22,4,9 ; Move up the API
4371 bne- hpfTLBIE32 ; It is locked, go wait...
4372 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4374 stwcx. r0,0,r9 ; Try to get it
4375 bne- hpfTLBIE32 ; We was beat...
4376 addi r7,r7,1 ; Bump the steal count
4378 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4379 li r0,0 ; Lock clear value
4381 tlbie r12 ; Invalidate it everywhere
4384 beq- hpfNoTS32 ; Can not have MP on this machine...
4386 eieio ; Make sure that the tlbie happens first
4387 tlbsync ; Wait for everyone to catch up
4388 sync ; Make sure of it all
4390 hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
4392 stw r7,hwSteals(r4) ; Save the steal count
4393 bgt cr5,hpfInser32 ; We just stole a block mapping...
4395 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4397 la r11,ppLink+4(r3) ; Point to the master RC copy
4398 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4399 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4401 hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4402 or r0,r0,r2 ; Merge in the new RC
4403 stwcx. r0,0,r11 ; Try to stick it back
4404 bne- hpfMrgRC32 ; Try again if we collided...
4407 hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
4408 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4410 lhz r10,mpSpace(r7) ; Get the space
4411 lwz r9,mpVAddr+4(r7) ; And the vaddr
4412 cmplw cr1,r10,r8 ; Is this one of ours?
4413 xor r9,r12,r9 ; Compare virtual address
4414 cmplwi r9,0x1000 ; See if we really match
4415 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4416 beq+ hpfFPnch2 ; Yes, found ours...
4418 lwz r7,mpAlias+4(r7) ; Chain on to the next
4419 b hpfFPnch ; Check it out...
4421 hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4422 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4423 bl mapPhysUnlock ; Unlock the physent now
4425 hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4427 stw r24,4(r19) ; Stuff in the real part of the PTE
4428 eieio ; Make sure this gets there first
4430 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4431 mr r17,r16 ; Get the PCA image to save
4432 b hpfFinish ; Go join the common exit code...
4436 ; At this point we are about to do the 64-bit PTE generation.
4438 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4442 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4443 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4444 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4451 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4452 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4454 mfsdr1 r27 ; Get the hash table base address
4456 sldi r11,r22,32 ; Slide top of adjusted EA over
4457 sldi r14,r14,32 ; Slide top of VSID over
4458 rlwinm r5,r27,0,27,31 ; Isolate the size
4459 eqv r16,r16,r16 ; Get all foxes here
4460 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4461 mr r2,r10 ; Save the flag part of the mapping
4462 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4463 rldicr r27,r27,0,45 ; Clean up the hash table base
4464 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4465 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4466 subfic r5,r5,46 ; Get number of leading zeros
4467 xor r19,r0,r15 ; Calculate hash
4468 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4469 srd r16,r16,r5 ; Shift over to get length of table
4470 srdi r19,r19,5 ; Convert page offset to hash table offset
4471 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4472 rldicr r10,r10,0,51 ; Clear out flags
4473 sldi r24,r24,12 ; Change ppnum to physical address
4474 sub r11,r11,r10 ; Get the offset from the base mapping
4475 and r19,r19,r16 ; Wrap into hash table
4476 add r24,r24,r11 ; Get actual physical address of this page
4477 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4478 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4479 subfic r20,r20,-4 ; Get negative offset to PCA
4480 ori r24,r24,lo16(mpR) ; Force on the reference bit
4481 add r20,r20,r27 ; Point to the PCA slot
4482 add r19,r19,r27 ; Point to the PTEG
4485 ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4486 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4488 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4489 ; that some other processor beat us and stuck in a PTE or that
4490 ; all we had was a simple segment exception and the PTE was there the whole time.
4491 ; If we find one a pointer, we are done.
4494 mr r7,r20 ; Copy the PCA pointer
4495 bl mapLockPteg ; Lock the PTEG
4497 lwz r12,mpPte(r31) ; Get the offset to the PTE
4498 mr r17,r6 ; Remember the PCA image
4499 mr r18,r6 ; Prime post-selection PCA image
4500 andi. r0,r12,mpHValid ; See if we have a PTE now
4501 li r21,8 ; Get the number of slots
4503 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4505 bne-- hpfBailOut ; Someone already did this for us...
4508 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4509 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4510 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4511 ; R4 returns the slot index.
4513 ; REMEMBER: CR7 indicates that we are building a block mapping.
4516 hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4517 mr r6,r17 ; Restore original state of PCA
4518 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4519 blt- hpfBailOut ; Holy Cow, all slots are locked...
4521 bl mapSelSlot ; Go select a slot
4523 cmplwi cr5,r3,1 ; Did we steal a slot?
4524 mr r18,r6 ; Remember the PCA image after selection
4525 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
4526 lwz r10,hwSteals(r2) ; Get the steal count
4527 blt++ cr5,hpfInser64 ; Nope, no steal...
4529 ld r6,0(r19) ; Get the old PTE
4530 ld r7,8(r19) ; Get the real part of the stealee
4531 rldicr r6,r6,0,62 ; Clear the valid bit
4532 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4533 srdi r3,r7,12 ; Change page address to a page address
4534 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4535 cmplwi cr1,r3,0 ; Check if this is in RAM
4536 bne-- hpfNoPte64 ; Could not get it, try for another...
4538 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4540 hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4541 li r9,tlbieLock ; Get the TLBIE lock
4543 srdi r11,r6,5 ; Shift VSID over for back hash
4544 mfsprg r4,0 ; Get the per_proc
4545 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4546 sync ; Make sure the invalid is stored
4548 sldi r12,r6,16 ; Move AVPN to EA position
4549 sldi r11,r11,5 ; Move this to the page position
4551 hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4552 mr. r0,r0 ; Is it locked?
4553 li r0,1 ; Get our lock word
4554 bne-- hpfTLBIE65 ; It is locked, go wait...
4556 stwcx. r0,0,r9 ; Try to get it
4557 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4558 rldicl r8,r6,52,50 ; Isolate the address space ID
4559 bne-- hpfTLBIE64 ; We was beat...
4560 addi r10,r10,1 ; Bump the steal count
4562 rldicl r11,r12,0,16 ; Clear cause the book says so
4563 li r0,0 ; Lock clear value
4565 tlbie r11 ; Invalidate it everywhere
4567 mr r7,r8 ; Get a copy of the space ID
4568 eieio ; Make sure that the tlbie happens first
4569 rldimi r7,r7,14,36 ; Copy address space to make hash value
4570 tlbsync ; Wait for everyone to catch up
4571 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
4572 srdi r2,r6,26 ; Shift original segment down to bottom
4574 ptesync ; Make sure of it all
4575 xor r7,r7,r2 ; Compute original segment
4576 stw r0,tlbieLock(0) ; Clear the tlbie lock
4578 stw r10,hwSteals(r4) ; Save the steal count
4579 bgt cr5,hpfInser64 ; We just stole a block mapping...
4581 rldimi r12,r7,28,0 ; Insert decoded segment
4582 rldicl r4,r12,0,13 ; Trim to max supported address
4584 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4586 la r11,ppLink+4(r3) ; Point to the master RC copy
4587 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4588 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4590 hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
4591 li r12,ppLFAmask ; Get mask to clean up alias pointer
4592 or r0,r0,r2 ; Merge in the new RC
4593 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
4594 stwcx. r0,0,r11 ; Try to stick it back
4595 bne-- hpfMrgRC64 ; Try again if we collided...
4597 hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4598 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4600 lhz r10,mpSpace(r7) ; Get the space
4601 ld r9,mpVAddr(r7) ; And the vaddr
4602 cmplw cr1,r10,r8 ; Is this one of ours?
4603 xor r9,r4,r9 ; Compare virtual address
4604 cmpldi r9,0x1000 ; See if we really match
4605 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4606 beq++ hpfFPnch2x ; Yes, found ours...
4608 ld r7,mpAlias(r7) ; Chain on to the next
4609 b hpfFPnchx ; Check it out...
4613 hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4614 stwcx. r7,0,r7 ; Kill reservation
4616 hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4617 mr. r0,r0 ; Is it locked?
4618 beq++ hpfTLBIE64 ; Yup, wait for it...
4619 b hpfTLBIE63 ; Nope, try again..
4623 hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4624 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4625 bl mapPhysUnlock ; Unlock the physent now
4628 hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4629 eieio ; Make sure this gets there first
4630 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4631 mr r17,r18 ; Get the PCA image to set
4632 b hpfFinish ; Go join the common exit code...
4635 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4636 ori r0,r0,lo16(Choke) ; System abend
4640 ; This is the common code we execute when we are finished setting up the PTE.
4645 hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4646 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4647 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4648 stw r4,mpPte(r31) ; Remember our PTE
4650 hpfBailOut: eieio ; Make sure all updates come first
4651 stw r17,0(r20) ; Unlock and set the final PCA
4654 ; This is where we go if we have started processing the fault, but find that someone
4655 ; else has taken care of it.
4658 hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4659 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4660 sth r2,mpFlags+2(r31) ; Set it
4662 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4663 bl sxlkUnlock ; Unlock the search list
4665 li r11,T_IN_VAIN ; Say that it was handled
4666 b EXT(PFSExit) ; Leave...
4669 ; This is where we go when we find that someone else
4670 ; is in the process of handling the fault.
4673 hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4674 stwcx. r3,0,r3 ; Do it
4676 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4677 bl sxlkUnlock ; Unlock the search list
4679 li r11,T_IN_VAIN ; Say that it was handled
4680 b EXT(PFSExit) ; Leave...
4683 ; Guest shadow assist -- page fault handler
4685 ; Here we handle a fault in a guest pmap that has the guest shadow mapping
4686 ; assist active. We locate the VMM pmap extension block, which contains an
4687 ; index over the discontiguous multi-page shadow hash table. The index
4688 ; corresponding to our vaddr is selected, and the selected group within
4689 ; that page is searched for a valid and active entry that contains
4690 ; our vaddr and space id. The search is pipelined, so that we may fetch
4691 ; the next slot while examining the current slot for a hit. The final
4692 ; search iteration is unrolled so that we don't fetch beyond the end of
4693 ; our group, which could have dire consequences depending upon where the
4694 ; physical hash page is located.
4696 ; The VMM pmap extension block occupies a page. Begining at offset 0, we
4697 ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4698 ; after the pmap_vmm_ext is the hash table physical address index, a
4699 ; linear list of 64-bit physical addresses of the pages that comprise
4702 ; In the event that we succesfully locate a guest mapping, we re-join
4703 ; the page fault path at hpfGVfound with the mapping's address in r31;
4704 ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4705 ; a share of the pmap search lock for the host pmap with the host pmap's
4706 ; address in r28, the guest pmap's space id in r21, and the guest pmap's
4712 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4714 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4715 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4716 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4717 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4718 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4719 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4720 lwz r6,vxsGpf(r11) ; Get guest fault count
4722 srwi r3,r10,12 ; Form shadow hash:
4723 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4724 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4725 ; Form index offset from hash page number
4726 add r31,r31,r4 ; r31 <- hash page index entry
4727 lwz r31,4(r31) ; r31 <- hash page paddr
4728 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4729 ; r31 <- hash group paddr
4731 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4732 bl sxlkShared ; Go get a shared lock on the mapping lists
4733 mr. r3,r3 ; Did we get the lock?
4734 bne- hpfBadLock ; Nope...
4736 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4737 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4738 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4739 addi r6,r6,1 ; Increment guest fault count
4740 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4741 mtctr r0 ; in this group
4742 stw r6,vxsGpf(r11) ; Update guest fault count
4747 mr r6,r3 ; r6 <- current mapping slot's flags
4748 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4749 mr r7,r4 ; r7 <- current mapping slot's space ID
4750 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4751 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4752 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4753 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4754 xor r7,r7,r21 ; Compare space ID
4755 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4756 xor r8,r8,r10 ; Compare virtual address
4757 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4758 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4760 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4761 bdnz hpfGVlp32 ; Iterate
4763 clrrwi r5,r5,12 ; Remove flags from virtual address
4764 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4765 xor r4,r4,r21 ; Compare space ID
4766 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4767 xor r5,r5,r10 ; Compare virtual address
4768 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4769 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4775 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4776 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4777 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4778 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4779 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4780 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4781 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4782 lwz r6,vxsGpf(r11) ; Get guest fault count
4784 srwi r3,r10,12 ; Form shadow hash:
4785 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4786 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4787 ; Form index offset from hash page number
4788 add r31,r31,r4 ; r31 <- hash page index entry
4789 ld r31,0(r31) ; r31 <- hash page paddr
4790 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4791 ; r31 <- hash group paddr
4793 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4794 bl sxlkShared ; Go get a shared lock on the mapping lists
4795 mr. r3,r3 ; Did we get the lock?
4796 bne-- hpfBadLock ; Nope...
4798 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4799 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4800 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4801 addi r6,r6,1 ; Increment guest fault count
4802 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4803 mtctr r0 ; in this group
4804 stw r6,vxsGpf(r11) ; Update guest fault count
4809 mr r6,r3 ; r6 <- current mapping slot's flags
4810 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4811 mr r7,r4 ; r7 <- current mapping slot's space ID
4812 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4813 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4814 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4815 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4816 xor r7,r7,r21 ; Compare space ID
4817 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4818 xor r8,r8,r10 ; Compare virtual address
4819 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4820 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4822 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4823 bdnz hpfGVlp64 ; Iterate
4825 clrrdi r5,r5,12 ; Remove flags from virtual address
4826 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4827 xor r4,r4,r21 ; Compare space ID
4828 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4829 xor r5,r5,r10 ; Compare virtual address
4830 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4831 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4834 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4835 addi r6,r6,1 ; Increment miss count
4836 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4840 * hw_set_user_space(pmap)
4841 * hw_set_user_space_dis(pmap)
4843 * Indicate whether memory space needs to be switched.
4844 * We really need to turn off interrupts here, because we need to be non-preemptable
4846 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4847 * register usage here. The VMM switch code in vmachmon.s that calls this
4848 * know what registers are in use. Check that if these change.
4854 .globl EXT(hw_set_user_space)
4856 LEXT(hw_set_user_space)
4858 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4859 mfmsr r10 ; Get the current MSR
4860 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4861 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4862 andc r10,r10,r8 ; Turn off VEC, FP for good
4863 andc r9,r10,r9 ; Turn off EE also
4864 mtmsr r9 ; Disable them
4865 isync ; Make sure FP and vec are off
4866 mfsprg r6,1 ; Get the current activation
4867 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4868 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4869 mfsprg r4,2 ; The the feature flags
4870 lwz r7,pmapvr(r3) ; Get the v to r translation
4871 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4872 mtcrf 0x80,r4 ; Get the Altivec flag
4873 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4874 cmplw cr1,r3,r2 ; Same address space as before?
4875 stw r7,ppUserPmap(r6) ; Show our real pmap address
4876 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4877 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4878 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4879 mtmsr r10 ; Restore interruptions
4880 beqlr-- cr1 ; Leave if the same address space or not Altivec
4882 dssall ; Need to kill all data streams if adrsp changed
4887 .globl EXT(hw_set_user_space_dis)
4889 LEXT(hw_set_user_space_dis)
4891 lwz r7,pmapvr(r3) ; Get the v to r translation
4892 mfsprg r4,2 ; The the feature flags
4893 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4894 mfsprg r6,1 ; Get the current activation
4895 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4896 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4897 mtcrf 0x80,r4 ; Get the Altivec flag
4898 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4899 cmplw cr1,r3,r2 ; Same address space as before?
4900 stw r7,ppUserPmap(r6) ; Show our real pmap address
4901 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4902 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4903 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4904 beqlr-- cr1 ; Leave if the same
4906 dssall ; Need to kill all data streams if adrsp changed
4910 /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4912 * Lock must already be held on mapping block list
4913 * returns 0 if all slots filled.
4914 * returns n if a slot is found and it is not the last
4915 * returns -n if a slot is found and it is the last
4916 * when n and -n are returned, the corresponding bit is cleared
4917 * the mapping is zeroed out before return
4925 lwz r4,mbfree(r3) ; Get the 1st mask
4926 lis r0,0x8000 ; Get the mask to clear the first free bit
4927 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4928 mr r12,r3 ; Save the block ptr
4929 cntlzw r3,r4 ; Get first 1-bit in 1st word
4930 srw. r9,r0,r3 ; Get bit corresponding to first free one
4931 cntlzw r10,r5 ; Get first free field in second word
4932 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4933 bne mapalc1f ; Found one in 1st word
4935 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4936 li r3,0 ; assume failure return
4937 andc r5,r5,r9 ; Turn it off
4938 beqlr-- ; There are no 1 bits left...
4939 addi r3,r10,32 ; set the correct number
4942 or. r0,r4,r5 ; any more bits set?
4943 stw r4,mbfree(r12) ; update bitmasks
4944 stw r5,mbfree+4(r12)
4946 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4948 dcbz r6,r12 ; clear the 64-byte mapping
4951 bnelr++ ; return if another bit remains set
4953 neg r3,r3 ; indicate we just returned the last bit
4957 /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4959 * Lock must already be held on mapping block list
4960 * returns 0 if all slots filled.
4961 * returns n if a slot is found and it is not the last
4962 * returns -n if a slot is found and it is the last
4963 * when n and -n are returned, the corresponding bits are cleared
4964 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4965 * the mapping is zero'd out before return
4971 lwz r4,mbfree(r3) ; Get the first mask
4972 lis r0,0x8000 ; Get the mask to clear the first free bit
4973 lwz r5,mbfree+4(r3) ; Get the second mask
4974 mr r12,r3 ; Save the block ptr
4975 slwi r6,r4,1 ; shift first word over
4976 and r6,r4,r6 ; lite start of double bit runs in 1st word
4977 slwi r7,r5,1 ; shift 2nd word over
4978 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4979 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4980 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4981 cntlzw r10,r7 ; Get first free field in second word
4982 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4983 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4984 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4985 bne mapalc2a ; Found two consecutive free bits in 1st word
4987 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4988 li r3,0 ; assume failure
4989 srwi r11,r9,1 ; get mask for 2nd bit
4990 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4991 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4992 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4993 addi r3,r10,32 ; set the correct number
4996 or. r0,r4,r5 ; any more bits set?
4997 stw r4,mbfree(r12) ; update bitmasks
4998 stw r5,mbfree+4(r12)
4999 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
5003 dcbz r6,r12 ; zero out the 128-byte mapping
5004 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
5005 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
5008 bnelr++ ; return if another bit remains set
5010 neg r3,r3 ; indicate we just returned the last bit
5014 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5015 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5016 beqlr ; no, we failed
5017 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5018 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5019 li r3,31 ; get index of this field
5024 ; This routine initialzes the hash table and PCA.
5025 ; It is done here because we may need to be 64-bit to do it.
5029 .globl EXT(hw_hash_init)
5033 mfsprg r10,2 ; Get feature flags
5034 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5035 mtcrf 0x02,r10 ; move pf64Bit to cr6
5036 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5037 lis r4,0xFF01 ; Set all slots free and start steal at end
5038 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5039 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5041 lwz r12,0(r12) ; Get hash table size
5043 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5045 lwz r11,4(r11) ; Get hash table base
5047 hhiNext32: cmplw r3,r12 ; Have we reached the end?
5048 bge- hhiCPCA32 ; Yes...
5049 dcbz r3,r11 ; Clear the line
5050 addi r3,r3,32 ; Next one...
5051 b hhiNext32 ; Go on...
5053 hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5054 li r3,-4 ; Displacement to first PCA entry
5055 neg r12,r12 ; Get negative end of PCA
5057 hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5058 subi r3,r3,4 ; Next slot
5059 cmpw r3,r12 ; Have we finished?
5060 bge+ hhiNPCA32 ; Not yet...
5063 hhiSF: mfmsr r9 ; Save the MSR
5065 mr r0,r9 ; Get a copy of the MSR
5066 ld r11,0(r11) ; Get hash table base
5067 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5068 mtmsrd r0 ; Turn on SF
5072 hhiNext64: cmpld r3,r12 ; Have we reached the end?
5073 bge-- hhiCPCA64 ; Yes...
5074 dcbz128 r3,r11 ; Clear the line
5075 addi r3,r3,128 ; Next one...
5076 b hhiNext64 ; Go on...
5078 hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5079 li r3,-4 ; Displacement to first PCA entry
5080 neg r12,r12 ; Get negative end of PCA
5082 hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5083 subi r3,r3,4 ; Next slot
5084 cmpd r3,r12 ; Have we finished?
5085 bge++ hhiNPCA64 ; Not yet...
5087 mtmsrd r9 ; Turn off SF if it was off
5093 ; This routine sets up the hardware to start translation.
5094 ; Note that we do NOT start translation.
5098 .globl EXT(hw_setup_trans)
5100 LEXT(hw_setup_trans)
5102 mfsprg r11,0 ; Get the per_proc block
5103 mfsprg r12,2 ; Get feature flags
5106 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5107 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5108 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5109 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5110 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5112 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5114 li r9,0 ; Clear out a register
5117 mtdbatu 0,r9 ; Invalidate maps
5118 mtdbatl 0,r9 ; Invalidate maps
5119 mtdbatu 1,r9 ; Invalidate maps
5120 mtdbatl 1,r9 ; Invalidate maps
5121 mtdbatu 2,r9 ; Invalidate maps
5122 mtdbatl 2,r9 ; Invalidate maps
5123 mtdbatu 3,r9 ; Invalidate maps
5124 mtdbatl 3,r9 ; Invalidate maps
5126 mtibatu 0,r9 ; Invalidate maps
5127 mtibatl 0,r9 ; Invalidate maps
5128 mtibatu 1,r9 ; Invalidate maps
5129 mtibatl 1,r9 ; Invalidate maps
5130 mtibatu 2,r9 ; Invalidate maps
5131 mtibatl 2,r9 ; Invalidate maps
5132 mtibatu 3,r9 ; Invalidate maps
5133 mtibatl 3,r9 ; Invalidate maps
5135 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5136 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5137 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5138 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5139 lwz r11,4(r11) ; Get hash table base
5140 lwz r12,0(r12) ; Get hash table size
5141 subi r12,r12,1 ; Back off by 1
5142 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5144 mtsdr1 r11 ; Ok, we now have the hash table set up
5147 li r12,invalSpace ; Get the invalid segment value
5148 li r10,0 ; Start low
5150 hstsetsr: mtsrin r12,r10 ; Set the SR
5151 addis r10,r10,0x1000 ; Bump the segment
5152 mr. r10,r10 ; Are we finished?
5153 bne+ hstsetsr ; Nope...
5161 hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5162 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5163 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5164 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5165 ld r11,0(r11) ; Get hash table base
5166 lwz r12,0(r12) ; Get hash table size
5167 cntlzw r10,r12 ; Get the number of bits
5168 subfic r10,r10,13 ; Get the extra bits we need
5169 or r11,r11,r10 ; Add the size field to SDR1
5171 mtsdr1 r11 ; Ok, we now have the hash table set up
5174 li r0,0 ; Set an SLB slot index of 0
5175 slbia ; Trash all SLB entries (except for entry 0 that is)
5176 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5177 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5178 slbie r7 ; Invalidate it
5184 ; This routine turns on translation for the first time on a processor
5188 .globl EXT(hw_start_trans)
5190 LEXT(hw_start_trans)
5193 mfmsr r10 ; Get the msr
5194 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5196 mtmsr r10 ; Everything falls apart here
5204 ; This routine validates a segment register.
5205 ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5208 ; r4 = segment[0:31]
5209 ; r5 = segment[32:63]
5213 ; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5214 ; Note that there is no reason to apply the key modifier here because this is only
5215 ; used for kernel accesses.
5219 .globl EXT(hw_map_seg)
5223 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5224 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5225 mfsprg r10,2 ; Get feature flags
5228 ; Note: the following code would problably be easier to follow if I split it,
5229 ; but I just wanted to see if I could write this to work on both 32- and 64-bit
5230 ; machines combined.
5234 ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5235 ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5237 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5238 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5239 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5240 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5241 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5242 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5243 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5244 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5245 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5246 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5248 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5249 ; concatenated together. There is garbage
5250 ; at the top for 64-bit but we will clean
5252 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5256 ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5257 ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5261 ; What we have now is:
5264 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5265 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5266 ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5267 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5268 ; 0 0 1 2 3 - for 32-bit machines
5272 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5273 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5274 ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5275 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5276 ; 0 0 1 2 3 - for 32-bit machines
5280 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5281 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5282 ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5283 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5284 ; 0 0 1 2 3 - for 32-bit machines
5288 xor r8,r8,r2 ; Calculate VSID
5290 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
5291 mfsprg r12,0 ; Get the per_proc
5292 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5293 mfmsr r6 ; Get current MSR
5294 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5295 mtmsrd r0,1 ; Set only the EE bit to 0
5296 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5297 mfmsr r11 ; Get the MSR right now, after disabling EE
5298 andc r2,r11,r2 ; Turn off translation now
5299 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5300 or r11,r11,r6 ; Turn on the EE bit if it was on
5301 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5302 isync ; Hang out a bit
5304 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5305 sldi r9,r9,9 ; Position the key and noex bit
5307 rldimi r5,r8,12,0 ; Form the VSID/key
5309 not r3,r6 ; Make valids be 0s
5311 cntlzd r7,r3 ; Find a free SLB
5312 cmplwi r7,63 ; Did we find a free SLB entry?
5314 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5316 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5317 addi r7,r7,1 ; Make sure we skip slb 0
5318 blt++ hmsFreeSeg ; Yes, go load it...
5321 ; No free SLB entries, select one that is in use and invalidate it
5323 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5324 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5325 addi r2,r2,1 ; Set next slot to steal
5326 slbmfee r3,r7 ; Get the entry that is in the selected spot
5327 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5328 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5329 srawi r8,r8,31 ; Get -1 if steal index still in range
5330 slbie r3 ; Invalidate the in-use SLB entry
5331 and r2,r2,r8 ; Reset steal index when it should wrap
5334 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5336 ; We are now ready to stick the SLB entry in the SLB and mark it in use
5339 hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5340 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5341 srd r0,r0,r2 ; Set bit mask for allocation
5342 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5343 or r6,r6,r0 ; Turn on the allocation flag
5345 slbmte r5,r4 ; Make that SLB entry
5347 std r6,validSegs(r12) ; Mark as valid
5348 mtmsrd r11 ; Restore the MSR
5355 mfsprg r12,1 ; Get the current activation
5356 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5357 rlwinm r8,r8,0,8,31 ; Clean up the VSID
5358 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5359 lis r0,0x8000 ; Set bit 0
5360 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5361 srw r0,r0,r2 ; Get bit corresponding to SR
5362 addi r7,r12,validSegs ; Point to the valid segment flags directly
5364 mtsrin r8,r4 ; Set the actual SR
5365 isync ; Need to make sure this is done
5367 hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5368 or r6,r6,r0 ; Show that SR is valid
5369 stwcx. r6,0,r7 ; Set the valid SR flags
5370 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5376 ; This routine invalidates a segment register.
5380 .globl EXT(hw_blow_seg)
5384 mfsprg r10,2 ; Get feature flags
5385 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5387 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5389 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5391 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5392 mfmsr r6 ; Get current MSR
5393 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5394 mtmsrd r0,1 ; Set only the EE bit to 0
5395 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5396 mfmsr r11 ; Get the MSR right now, after disabling EE
5397 andc r2,r11,r2 ; Turn off translation now
5398 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5399 or r11,r11,r6 ; Turn on the EE bit if it was on
5400 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5401 isync ; Hang out a bit
5403 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5405 slbie r9 ; Invalidate the associated SLB entry
5407 mtmsrd r11 ; Restore the MSR
5414 mfsprg r12,1 ; Get the current activation
5415 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5416 addi r7,r12,validSegs ; Point to the valid segment flags directly
5417 lwarx r4,0,r7 ; Get and reserve the valid segment flags
5418 rlwinm r6,r9,4,28,31 ; Convert segment to number
5419 lis r2,0x8000 ; Set up a mask
5420 srw r2,r2,r6 ; Make a mask
5421 and. r0,r4,r2 ; See if this is even valid
5422 li r5,invalSpace ; Set the invalid address space VSID
5423 beqlr ; Leave if already invalid...
5425 mtsrin r5,r9 ; Slam the segment register
5426 isync ; Need to make sure this is done
5428 hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5429 stwcx. r4,0,r7 ; Set the valid SR flags
5430 beqlr++ ; Stored ok, no interrupt, time to leave...
5432 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5433 b hbsrupt ; Try again...
5436 ; This routine invadates the entire pmap segment cache
5438 ; Translation is on, interrupts may or may not be enabled.
5442 .globl EXT(invalidateSegs)
5444 LEXT(invalidateSegs)
5446 la r10,pmapCCtl(r3) ; Point to the segment cache control
5447 eqv r2,r2,r2 ; Get all foxes
5449 isInv: lwarx r4,0,r10 ; Get the segment cache control value
5450 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5451 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5452 bne-- isInv0 ; Yes, try again...
5454 stwcx. r4,0,r10 ; Try to invalidate it
5455 bne-- isInv ; Someone else just stuffed it...
5459 isInv0: li r4,lgKillResv ; Get reservation kill zone
5460 stwcx. r4,0,r4 ; Kill reservation
5462 isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5463 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5464 bne-- isInv ; Nope...
5465 b isInv1 ; Still locked do it again...
5468 ; This routine switches segment registers between kernel and user.
5469 ; We have some assumptions and rules:
5470 ; We are in the exception vectors
5471 ; pf64Bitb is set up
5472 ; R3 contains the MSR we going to
5473 ; We can not use R4, R13, R20, R21, R25, R26, R29
5474 ; R13 is the savearea
5475 ; R29 has the per_proc
5477 ; We return R3 as 0 if we did not switch between kernel and user
5478 ; We also maintain and apply the user state key modifier used by VMM support;
5479 ; If we go to the kernel it is set to 0, otherwise it follows the bit
5484 .globl EXT(switchSegs)
5488 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5489 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5490 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5491 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5492 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5493 or r2,r2,r3 ; This will 1 if we will be using user segments
5494 li r3,0 ; Get a selection mask
5495 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5496 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5497 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5498 la r19,ppUserPmap(r29) ; Point to the current user pmap
5500 ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5501 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5503 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5504 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5505 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5506 or r8,r8,r19 ; Get the pointer to the pmap we are using
5508 beqlr ; We are staying in the same mode, do not touch segs...
5510 lwz r28,0(r8) ; Get top half of pmap address
5511 lwz r10,4(r8) ; Get bottom half
5513 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5514 rlwinm r28,r28,0,1,0 ; Copy top to top
5515 stw r30,ppMapFlags(r29) ; Set the key modifier
5516 rlwimi r28,r10,0,0,31 ; Insert bottom
5518 la r10,pmapCCtl(r28) ; Point to the segment cache control
5519 la r9,pmapSegCache(r28) ; Point to the segment cache
5521 ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5522 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5523 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5524 bne-- ssgLock0 ; Yup, this is in use...
5526 stwcx. r16,0,r10 ; Try to set the lock
5527 bne-- ssgLock ; Did we get contention?
5529 not r11,r15 ; Invert the invalids to valids
5530 li r17,0 ; Set a mask for the SRs we are loading
5531 isync ; Make sure we are all caught up
5533 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5536 slbia ; Trash all SLB entries (except for entry 0 that is)
5537 li r17,1 ; Get SLB index to load (skip slb 0)
5538 oris r0,r0,0x8000 ; Get set for a mask
5539 b ssg64Enter ; Start on a cache line...
5543 ssgLock0: li r15,lgKillResv ; Killing field
5544 stwcx. r15,0,r15 ; Kill reservation
5546 ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5547 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5548 beq++ ssgLock ; Yup, this is in use...
5549 b ssgLock1 ; Nope, try again...
5551 ; This is the 32-bit address space switch code.
5552 ; We take a reservation on the segment cache and walk through.
5553 ; For each entry, we load the specified entries and remember which
5554 ; we did with a mask. Then, we figure out which segments should be
5555 ; invalid and then see which actually are. Then we load those with the
5556 ; defined invalid VSID.
5557 ; Afterwards, we unlock the segment cache.
5562 ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5563 cmplwi r12,pmapSegCacheUse ; See if we are done
5564 slwi r14,r12,4 ; Index to the cache slot
5565 lis r0,0x8000 ; Get set for a mask
5566 add r14,r14,r9 ; Point to the entry
5568 bge- ssg32Done ; All done...
5570 lwz r5,sgcESID+4(r14) ; Get the ESID part
5571 srw r2,r0,r12 ; Form a mask for the one we are loading
5572 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5574 andc r11,r11,r2 ; Clear the bit
5575 lwz r6,sgcVSID(r14) ; And get the VSID top
5577 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5579 xor r7,r7,r30 ; Modify the key before we actually set it
5580 srw r0,r0,r2 ; Get a mask for the SR we are loading
5581 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5582 or r17,r17,r0 ; Remember the segment
5583 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5584 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5586 mtsrin r8,r5 ; Load the segment
5587 b ssg32Enter ; Go enter the next...
5591 ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5592 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5594 lis r0,0x8000 ; Get set for a mask
5595 li r2,invalSpace ; Set the invalid address space VSID
5599 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5602 ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5603 cmplwi r18,16 ; Have we finished?
5604 srw r22,r0,r18 ; Get the mask bit
5605 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5606 andc r16,r16,r22 ; Get rid of the guy we just did
5607 bge ssg32Really ; Yes, we are really done now...
5609 mtsrin r2,r23 ; Invalidate the SR
5610 b ssg32Inval ; Do the next...
5615 stw r17,validSegs(r29) ; Set the valid SR flags
5616 li r3,1 ; Set kernel/user transition
5620 ; This is the 64-bit address space switch code.
5621 ; First we blow away all of the SLB entries.
5623 ; loading the SLB. Afterwards, we release the cache lock
5625 ; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5626 ; Its a performance thing...
5631 ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5632 cmplwi r12,pmapSegCacheUse ; See if we are done
5633 slwi r14,r12,4 ; Index to the cache slot
5634 srw r16,r0,r12 ; Form a mask for the one we are loading
5635 add r14,r14,r9 ; Point to the entry
5636 andc r11,r11,r16 ; Clear the bit
5637 bge-- ssg64Done ; All done...
5639 ld r5,sgcESID(r14) ; Get the ESID part
5640 ld r6,sgcVSID(r14) ; And get the VSID part
5641 oris r5,r5,0x0800 ; Turn on the valid bit
5642 or r5,r5,r17 ; Insert the SLB slot
5643 xor r6,r6,r30 ; Modify the key before we actually set it
5644 addi r17,r17,1 ; Bump to the next slot
5645 slbmte r6,r5 ; Make that SLB entry
5646 b ssg64Enter ; Go enter the next...
5650 ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5652 eqv r16,r16,r16 ; Load up with all foxes
5653 subfic r17,r17,64 ; Get the number of 1 bits we need
5655 sld r16,r16,r17 ; Get a mask for the used SLB entries
5656 li r3,1 ; Set kernel/user transition
5657 std r16,validSegs(r29) ; Set the valid SR flags
5661 ; mapSetUp - this function sets initial state for all mapping functions.
5662 ; We turn off all translations (physical), disable interruptions, and
5663 ; enter 64-bit mode if applicable.
5665 ; We also return the original MSR in r11, the feature flags in R12,
5666 ; and CR6 set up so we can do easy branches for 64-bit
5667 ; hw_clear_maps assumes r10, r9 will not be trashed.
5671 .globl EXT(mapSetUp)
5675 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5676 mfsprg r12,2 ; Get feature flags
5677 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5678 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5679 mfmsr r11 ; Save the MSR
5680 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5681 andc r11,r11,r0 ; Clear VEC and FP for good
5682 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5683 li r2,1 ; Prepare for 64 bit
5684 andc r0,r11,r0 ; Clear the rest
5685 bt pfNoMSRirb,msuNoMSR ; No MSR...
5686 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
5688 mtmsr r0 ; Translation and all off
5689 isync ; Toss prefetch
5694 msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5695 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5701 msuNoMSR: mr r2,r3 ; Save R3 across call
5702 mr r3,r0 ; Get the new MSR value
5703 li r0,loadMSR ; Get the MSR setter SC
5705 mr r3,r2 ; Restore R3
5706 blr ; Go back all set up...
5710 ; Guest shadow assist -- remove all guest mappings
5712 ; Remove all mappings for a guest pmap from the shadow hash table.
5715 ; r3 : address of pmap, 32-bit kernel virtual address
5717 ; Non-volatile register usage:
5718 ; r24 : host pmap's physical address
5719 ; r25 : VMM extension block's physical address
5720 ; r26 : physent address
5721 ; r27 : guest pmap's space ID number
5722 ; r28 : current hash table page index
5723 ; r29 : guest pmap's physical address
5724 ; r30 : saved msr image
5725 ; r31 : current mapping
5728 .globl EXT(hw_rem_all_gv)
5732 #define graStackSize ((31-24+1)*4)+4
5733 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5734 ; Mint a new stack frame
5735 mflr r0 ; Get caller's return address
5736 mfsprg r11,2 ; Get feature flags
5737 mtcrf 0x02,r11 ; Insert feature flags into cr6
5738 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5739 ; Save caller's return address
5740 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5741 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5742 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5743 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5744 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5745 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5746 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5747 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5749 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5751 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5752 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5753 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5754 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5755 b graStart ; Get to it
5756 gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5757 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5758 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5759 graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5760 xor r29,r3,r9 ; Convert pmap_t virt->real
5761 mr r30,r11 ; Save caller's msr image
5763 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5764 bl sxlkExclusive ; Get lock exclusive
5766 lwz r3,vxsGra(r25) ; Get remove all count
5767 addi r3,r3,1 ; Increment remove all count
5768 stw r3,vxsGra(r25) ; Update remove all count
5770 li r28,0 ; r28 <- first hash page table index to search
5771 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5773 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5774 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5775 ; Convert page index into page physical index offset
5776 add r31,r31,r11 ; Calculate page physical index entry address
5777 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5778 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5779 b graLoop ; Examine all slots in this page
5780 gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5781 b graLoop ; Examine all slots in this page
5784 graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5785 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5786 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5787 xor r4,r4,r27 ; Compare space ID number
5788 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5789 bne graMiss ; Not one of ours, skip it
5791 lwz r11,vxsGraHits(r25) ; Get remove hit count
5792 addi r11,r11,1 ; Increment remove hit count
5793 stw r11,vxsGraHits(r25) ; Update remove hit count
5795 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5796 bne graRemPhys ; Yes, nothing to disconnect
5798 lwz r11,vxsGraActive(r25) ; Get remove active count
5799 addi r11,r11,1 ; Increment remove hit count
5800 stw r11,vxsGraActive(r25) ; Update remove hit count
5802 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5803 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5804 ; r31 <- mapping's physical address
5805 ; r3 -> PTE slot physical address
5806 ; r4 -> High-order 32 bits of PTE
5807 ; r5 -> Low-order 32 bits of PTE
5809 ; r7 -> PCA physical address
5810 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5811 b graFreePTE ; Join 64-bit path to release the PTE
5812 graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5813 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5814 graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5815 beq- graRemPhys ; No valid PTE, we're almost done
5816 lis r0,0x8000 ; Prepare free bit for this slot
5817 srw r0,r0,r2 ; Position free bit
5818 or r6,r6,r0 ; Set it in our PCA image
5819 lwz r8,mpPte(r31) ; Get PTE pointer
5820 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5821 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5822 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5823 stw r6,0(r7) ; Update PCA and unlock the PTEG
5826 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5827 bl mapFindLockPN ; Find 'n' lock this page's physent
5828 mr. r26,r3 ; Got lock on our physent?
5829 beq-- graBadPLock ; No, time to bail out
5831 crset cr1_eq ; cr1_eq <- previous link is the anchor
5832 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5833 la r11,ppLink+4(r26) ; Point to chain anchor
5834 lwz r9,ppLink+4(r26) ; Get chain anchor
5835 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5837 graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5838 cmplw r9,r31 ; Is this the mapping to remove?
5839 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5840 bne graRemNext ; No, chain onward
5841 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5842 stw r8,0(r11) ; Unchain gpv->phys mapping
5843 b graRemoved ; Exit loop
5845 lwarx r0,0,r11 ; Get previous link
5846 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5847 stwcx. r0,0,r11 ; Update previous link
5848 bne- graRemRetry ; Lost reservation, retry
5849 b graRemoved ; Good work, let's get outta here
5851 graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5852 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5853 mr. r9,r8 ; Does next entry exist?
5854 b graRemLoop ; Carry on
5857 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5858 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5859 la r11,ppLink(r26) ; Point to chain anchor
5860 ld r9,ppLink(r26) ; Get chain anchor
5861 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5862 graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5863 cmpld r9,r31 ; Is this the mapping to remove?
5864 ld r8,mpAlias(r9) ; Get forward chain pinter
5865 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5866 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5867 std r8,0(r11) ; Unchain gpv->phys mapping
5868 b graRemoved ; Exit loop
5869 graRem64Rt: ldarx r0,0,r11 ; Get previous link
5870 and r0,r0,r7 ; Get flags
5871 or r0,r0,r8 ; Insert new forward pointer
5872 stdcx. r0,0,r11 ; Slam it back in
5873 bne-- graRem64Rt ; Lost reservation, retry
5874 b graRemoved ; Good work, let's go home
5877 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5878 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5879 mr. r9,r8 ; Does next entry exist?
5880 b graRem64Lp ; Carry on
5883 mr r3,r26 ; r3 <- physent's address
5884 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5886 lwz r3,mpFlags(r31) ; Get mapping's flags
5887 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5888 ori r3,r3,mpgFree ; Mark mapping free
5889 stw r3,mpFlags(r31) ; Update flags
5891 graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5892 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5893 bne graLoop ; No, examine next slot
5894 addi r28,r28,1 ; Increment hash table page index
5895 cmplwi r28,GV_HPAGES ; End of hash table?
5896 bne graPgLoop ; Examine next hash table page
5898 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5899 bl sxlkUnlock ; Release host pmap's search lock
5901 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5902 mtmsr r30 ; Restore 'rupts, translation
5903 isync ; Throw a small wrench into the pipeline
5904 b graPopFrame ; Nothing to do now but pop a frame and return
5905 graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5907 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5908 ; Get caller's return address
5909 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5910 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5911 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5912 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5913 mtlr r0 ; Prepare return address
5914 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5915 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5916 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5917 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5918 lwz r1,0(r1) ; Pop stack frame
5919 blr ; Return to caller
5923 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5924 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5925 li r3,failMapping ; The BOMB, Dmitri.
5926 sc ; The hydrogen bomb.
5930 ; Guest shadow assist -- remove local guest mappings
5932 ; Remove local mappings for a guest pmap from the shadow hash table.
5935 ; r3 : address of guest pmap, 32-bit kernel virtual address
5937 ; Non-volatile register usage:
5938 ; r20 : current active map word's physical address
5939 ; r21 : current hash table page address
5940 ; r22 : updated active map word in process
5941 ; r23 : active map word in process
5942 ; r24 : host pmap's physical address
5943 ; r25 : VMM extension block's physical address
5944 ; r26 : physent address
5945 ; r27 : guest pmap's space ID number
5946 ; r28 : current active map index
5947 ; r29 : guest pmap's physical address
5948 ; r30 : saved msr image
5949 ; r31 : current mapping
5952 .globl EXT(hw_rem_local_gv)
5954 LEXT(hw_rem_local_gv)
5956 #define grlStackSize ((31-20+1)*4)+4
5957 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5958 ; Mint a new stack frame
5959 mflr r0 ; Get caller's return address
5960 mfsprg r11,2 ; Get feature flags
5961 mtcrf 0x02,r11 ; Insert feature flags into cr6
5962 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5963 ; Save caller's return address
5964 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5965 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5966 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5967 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5968 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5969 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5970 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5971 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5972 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
5973 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
5974 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
5975 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
5977 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5979 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
5980 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5981 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5982 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5983 b grlStart ; Get to it
5984 grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5985 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5986 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5988 grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5989 xor r29,r3,r9 ; Convert pmap_t virt->real
5990 mr r30,r11 ; Save caller's msr image
5992 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5993 bl sxlkExclusive ; Get lock exclusive
5995 li r28,0 ; r28 <- index of first active map word to search
5996 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5997 b grlMap1st ; Examine first map word
6000 grlNextMap: stw r22,0(r21) ; Save updated map word
6001 addi r28,r28,1 ; Increment map word index
6002 cmplwi r28,GV_MAP_WORDS ; See if we're done
6003 beq grlDone ; Yup, let's get outta here
6005 grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
6006 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
6007 ; Convert map index into map index offset
6008 add r20,r20,r11 ; Calculate map array element address
6009 lwz r22,0(r20) ; Get active map word at index
6010 mr. r23,r22 ; Any active mappings indicated?
6011 beq grlNextMap ; Nope, check next word
6013 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6014 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6015 ; Extract page index from map word index and convert
6016 ; into page physical index offset
6017 add r21,r21,r11 ; Calculate page physical index entry address
6018 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6019 lwz r21,4(r21) ; Get selected hash table page's address
6020 b grlLoop ; Examine all slots in this page
6021 grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6022 b grlLoop ; Examine all slots in this page
6025 grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6026 cmplwi r11,32 ; Any active mappings left in this word?
6027 lis r12,0x8000 ; Prepare mask to reset bit
6028 srw r12,r12,r11 ; Position mask bit
6029 andc r23,r23,r12 ; Reset lit bit
6030 beq grlNextMap ; No bits lit, examine next map word
6032 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6033 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6034 ; Extract slot band number from index and insert
6035 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6037 lwz r3,mpFlags(r31) ; Get mapping's flags
6038 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6039 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6040 xor r4,r4,r27 ; Compare space ID number
6041 or. r4,r4,r5 ; (space id miss || global)
6042 bne grlLoop ; Not one of ours, skip it
6043 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6044 ori r3,r3,mpgDormant ; Mark entry dormant
6045 stw r3,mpFlags(r31) ; Update mapping's flags
6047 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6048 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6049 ; r31 <- mapping's physical address
6050 ; r3 -> PTE slot physical address
6051 ; r4 -> High-order 32 bits of PTE
6052 ; r5 -> Low-order 32 bits of PTE
6054 ; r7 -> PCA physical address
6055 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6056 b grlFreePTE ; Join 64-bit path to release the PTE
6057 grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6058 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6059 grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6060 beq- grlLoop ; No valid PTE, we're done with this mapping
6061 lis r0,0x8000 ; Prepare free bit for this slot
6062 srw r0,r0,r2 ; Position free bit
6063 or r6,r6,r0 ; Set it in our PCA image
6064 lwz r8,mpPte(r31) ; Get PTE pointer
6065 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6066 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6067 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6068 stw r6,0(r7) ; Update PCA and unlock the PTEG
6069 b grlLoop ; On to next active mapping in this map word
6071 grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6072 bl sxlkUnlock ; Release host pmap's search lock
6074 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6075 mtmsr r30 ; Restore 'rupts, translation
6076 isync ; Throw a small wrench into the pipeline
6077 b grlPopFrame ; Nothing to do now but pop a frame and return
6078 grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6080 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6081 ; Get caller's return address
6082 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6083 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6084 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6085 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6086 mtlr r0 ; Prepare return address
6087 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6088 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6089 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6090 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6091 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6092 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6093 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6094 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6095 lwz r1,0(r1) ; Pop stack frame
6096 blr ; Return to caller
6100 ; Guest shadow assist -- resume a guest mapping
6102 ; Locates the specified dormant mapping, and if it exists validates it and makes it
6106 ; r3 : address of host pmap, 32-bit kernel virtual address
6107 ; r4 : address of guest pmap, 32-bit kernel virtual address
6108 ; r5 : host virtual address, high-order 32 bits
6109 ; r6 : host virtual address, low-order 32 bits
6110 ; r7 : guest virtual address, high-order 32 bits
6111 ; r8 : guest virtual address, low-order 32 bits
6112 ; r9 : guest mapping protection code
6114 ; Non-volatile register usage:
6115 ; r23 : VMM extension block's physical address
6116 ; r24 : physent physical address
6117 ; r25 : caller's msr image from mapSetUp
6118 ; r26 : guest mapping protection code
6119 ; r27 : host pmap physical address
6120 ; r28 : guest pmap physical address
6121 ; r29 : host virtual address
6122 ; r30 : guest virtual address
6123 ; r31 : gva->phys mapping's physical address
6126 .globl EXT(hw_res_map_gv)
6130 #define grsStackSize ((31-23+1)*4)+4
6132 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6133 ; Mint a new stack frame
6134 mflr r0 ; Get caller's return address
6135 mfsprg r11,2 ; Get feature flags
6136 mtcrf 0x02,r11 ; Insert feature flags into cr6
6137 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6138 ; Save caller's return address
6139 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6140 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6141 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6142 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6143 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6144 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6145 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6146 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6147 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6149 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6150 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6151 mr r26,r9 ; Copy guest mapping protection code
6153 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6154 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6155 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6156 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6157 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6158 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6159 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6160 srwi r11,r30,12 ; Form shadow hash:
6161 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6162 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6163 ; Form index offset from hash page number
6164 add r31,r31,r10 ; r31 <- hash page index entry
6165 lwz r31,4(r31) ; r31 <- hash page paddr
6166 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6167 ; r31 <- hash group paddr
6168 b grsStart ; Get to it
6170 grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6171 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6172 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6173 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6174 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6175 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6176 srwi r11,r30,12 ; Form shadow hash:
6177 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6178 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6179 ; Form index offset from hash page number
6180 add r31,r31,r10 ; r31 <- hash page index entry
6181 ld r31,0(r31) ; r31 <- hash page paddr
6182 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6183 ; r31 <- hash group paddr
6185 grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6186 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6187 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6188 mr r25,r11 ; Save caller's msr image
6190 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6191 bl sxlkExclusive ; Get lock exclusive
6193 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6194 mtctr r0 ; in this group
6195 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6197 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6198 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6199 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6200 b grs32SrchLp ; Let the search begin!
6204 mr r6,r3 ; r6 <- current mapping slot's flags
6205 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6206 mr r7,r4 ; r7 <- current mapping slot's space ID
6207 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6208 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6209 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6210 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6211 xor r7,r7,r9 ; Compare space ID
6212 or r0,r11,r7 ; r0 <- !(!free && space match)
6213 xor r8,r8,r30 ; Compare virtual address
6214 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6215 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6217 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6218 bdnz grs32SrchLp ; Iterate
6220 mr r6,r3 ; r6 <- current mapping slot's flags
6221 clrrwi r5,r5,12 ; Remove flags from virtual address
6222 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6223 xor r4,r4,r9 ; Compare space ID
6224 or r0,r11,r4 ; r0 <- !(!free && space match)
6225 xor r5,r5,r30 ; Compare virtual address
6226 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6227 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6228 b grsSrchMiss ; No joy in our hash group
6231 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6232 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6233 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6234 b grs64SrchLp ; Let the search begin!
6238 mr r6,r3 ; r6 <- current mapping slot's flags
6239 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6240 mr r7,r4 ; r7 <- current mapping slot's space ID
6241 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6242 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6243 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6244 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6245 xor r7,r7,r9 ; Compare space ID
6246 or r0,r11,r7 ; r0 <- !(!free && space match)
6247 xor r8,r8,r30 ; Compare virtual address
6248 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6249 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6251 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6252 bdnz grs64SrchLp ; Iterate
6254 mr r6,r3 ; r6 <- current mapping slot's flags
6255 clrrdi r5,r5,12 ; Remove flags from virtual address
6256 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6257 xor r4,r4,r9 ; Compare space ID
6258 or r0,r11,r4 ; r0 <- !(!free && space match)
6259 xor r5,r5,r30 ; Compare virtual address
6260 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6261 bne grsSrchMiss ; No joy in our hash group
6264 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6265 bne grsFindHost ; Yes, nothing to disconnect
6267 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6268 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6269 ; r31 <- mapping's physical address
6270 ; r3 -> PTE slot physical address
6271 ; r4 -> High-order 32 bits of PTE
6272 ; r5 -> Low-order 32 bits of PTE
6274 ; r7 -> PCA physical address
6275 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6276 b grsFreePTE ; Join 64-bit path to release the PTE
6277 grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6278 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6279 grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6280 beq- grsFindHost ; No valid PTE, we're almost done
6281 lis r0,0x8000 ; Prepare free bit for this slot
6282 srw r0,r0,r2 ; Position free bit
6283 or r6,r6,r0 ; Set it in our PCA image
6284 lwz r8,mpPte(r31) ; Get PTE pointer
6285 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6286 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6287 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6288 stw r6,0(r7) ; Update PCA and unlock the PTEG
6292 // We now have a dormant guest mapping that matches our space id and virtual address. Our next
6293 // step is to locate the host mapping that completes the guest mapping's connection to a physical
6294 // frame. The guest and host mappings must connect to the same physical frame, so they must both
6295 // be chained on the same physent. We search the physent chain for a host mapping matching our
6296 // host's space id and the host virtual address. If we succeed, we know that the entire chain
6297 // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6298 // resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6299 // host virtual or physical address has changed since the guest mapping was suspended, so it
6300 // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6301 // our caller that it will have to take its long path, translating the host virtual address
6302 // through the host's skiplist and installing a new guest mapping.
6304 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6305 bl mapFindLockPN ; Find 'n' lock this page's physent
6306 mr. r24,r3 ; Got lock on our physent?
6307 beq-- grsBadPLock ; No, time to bail out
6309 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6311 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6312 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6313 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6314 grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6315 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6316 lwz r7,mpFlags(r12) ; Get mapping's flags
6317 lhz r4,mpSpace(r12) ; Get mapping's space id number
6318 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6319 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6321 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6322 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6323 xori r0,r0,mpNormal ; Normal mapping?
6324 xor r4,r4,r6 ; Compare w/ host space id number
6325 xor r5,r5,r29 ; Compare w/ host virtual address
6326 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6327 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6329 b grsPELoop ; Iterate
6331 grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6332 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6333 ld r9,ppLink(r24) ; Get first mapping on physent
6334 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6335 andc r9,r9,r0 ; Cleanup mapping pointer
6336 grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6337 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6338 lwz r7,mpFlags(r12) ; Get mapping's flags
6339 lhz r4,mpSpace(r12) ; Get mapping's space id number
6340 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6341 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6342 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6343 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6344 xori r0,r0,mpNormal ; Normal mapping?
6345 xor r4,r4,r6 ; Compare w/ host space id number
6346 xor r5,r5,r29 ; Compare w/ host virtual address
6347 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6348 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6350 b grsPELp64 ; Iterate
6352 grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6353 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6354 stw r0,mpVAddr+4(r31) ; Write 'em back
6356 eieio ; Ensure previous mapping updates are visible
6357 lwz r0,mpFlags(r31) ; Get flags
6358 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6359 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6361 li r31,mapRtOK ; Indicate success
6362 b grsRelPhy ; Exit through physent lock release
6364 grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6365 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6366 la r11,ppLink+4(r24) ; Point to chain anchor
6367 lwz r9,ppLink+4(r24) ; Get chain anchor
6368 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6369 grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6370 cmplw r9,r31 ; Is this the mapping to remove?
6371 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6372 bne grsRemNext ; No, chain onward
6373 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6374 stw r8,0(r11) ; Unchain gpv->phys mapping
6375 b grsDelete ; Finish deleting mapping
6377 lwarx r0,0,r11 ; Get previous link
6378 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6379 stwcx. r0,0,r11 ; Update previous link
6380 bne- grsRemRetry ; Lost reservation, retry
6381 b grsDelete ; Finish deleting mapping
6384 grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6385 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6386 mr. r9,r8 ; Does next entry exist?
6387 b grsRemLoop ; Carry on
6390 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6391 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6392 la r11,ppLink(r24) ; Point to chain anchor
6393 ld r9,ppLink(r24) ; Get chain anchor
6394 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6395 grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6396 cmpld r9,r31 ; Is this the mapping to remove?
6397 ld r8,mpAlias(r9) ; Get forward chain pinter
6398 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6399 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6400 std r8,0(r11) ; Unchain gpv->phys mapping
6401 b grsDelete ; Finish deleting mapping
6402 grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6403 and r0,r0,r7 ; Get flags
6404 or r0,r0,r8 ; Insert new forward pointer
6405 stdcx. r0,0,r11 ; Slam it back in
6406 bne-- grsRem64Rt ; Lost reservation, retry
6407 b grsDelete ; Finish deleting mapping
6411 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6412 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6413 mr. r9,r8 ; Does next entry exist?
6414 b grsRem64Lp ; Carry on
6417 lwz r3,mpFlags(r31) ; Get mapping's flags
6418 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6419 ori r3,r3,mpgFree ; Mark mapping free
6420 stw r3,mpFlags(r31) ; Update flags
6422 li r31,mapRtNotFnd ; Didn't succeed
6424 grsRelPhy: mr r3,r24 ; r3 <- physent addr
6425 bl mapPhysUnlock ; Unlock physent chain
6427 grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6428 bl sxlkUnlock ; Release host pmap search lock
6430 grsRtn: mr r3,r31 ; r3 <- result code
6431 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6432 mtmsr r25 ; Restore 'rupts, translation
6433 isync ; Throw a small wrench into the pipeline
6434 b grsPopFrame ; Nothing to do now but pop a frame and return
6435 grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6437 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6438 ; Get caller's return address
6439 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6440 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6441 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6442 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6443 mtlr r0 ; Prepare return address
6444 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6445 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6446 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6447 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6448 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6449 lwz r1,0(r1) ; Pop stack frame
6450 blr ; Return to caller
6454 li r31,mapRtNotFnd ; Could not locate requested mapping
6455 b grsRelPmap ; Exit through host pmap search lock release
6459 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6460 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6461 li r3,failMapping ; The BOMB, Dmitri.
6462 sc ; The hydrogen bomb.
6466 ; Guest shadow assist -- add a guest mapping
6468 ; Adds a guest mapping.
6471 ; r3 : address of host pmap, 32-bit kernel virtual address
6472 ; r4 : address of guest pmap, 32-bit kernel virtual address
6473 ; r5 : guest virtual address, high-order 32 bits
6474 ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6475 ; r7 : new mapping's flags
6476 ; r8 : physical address, 32-bit page number
6478 ; Non-volatile register usage:
6479 ; r22 : hash group's physical address
6480 ; r23 : VMM extension block's physical address
6481 ; r24 : mapping's flags
6482 ; r25 : caller's msr image from mapSetUp
6483 ; r26 : physent physical address
6484 ; r27 : host pmap physical address
6485 ; r28 : guest pmap physical address
6486 ; r29 : physical address, 32-bit 4k-page number
6487 ; r30 : guest virtual address
6488 ; r31 : gva->phys mapping's physical address
6492 .globl EXT(hw_add_map_gv)
6497 #define gadStackSize ((31-22+1)*4)+4
6499 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6500 ; Mint a new stack frame
6501 mflr r0 ; Get caller's return address
6502 mfsprg r11,2 ; Get feature flags
6503 mtcrf 0x02,r11 ; Insert feature flags into cr6
6504 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6505 ; Save caller's return address
6506 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6507 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6508 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6509 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6510 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6511 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6512 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6513 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6514 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6515 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6517 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6518 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6519 mr r24,r7 ; Copy guest mapping's flags
6520 mr r29,r8 ; Copy target frame's physical address
6522 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6523 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6524 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6525 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6526 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6527 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6528 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6529 srwi r11,r30,12 ; Form shadow hash:
6530 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6531 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6532 ; Form index offset from hash page number
6533 add r22,r22,r10 ; r22 <- hash page index entry
6534 lwz r22,4(r22) ; r22 <- hash page paddr
6535 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6536 ; r22 <- hash group paddr
6537 b gadStart ; Get to it
6539 gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6540 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6541 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6542 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6543 srwi r11,r30,12 ; Form shadow hash:
6544 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6545 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6546 ; Form index offset from hash page number
6547 add r22,r22,r10 ; r22 <- hash page index entry
6548 ld r22,0(r22) ; r22 <- hash page paddr
6549 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6550 ; r22 <- hash group paddr
6552 gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6553 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6554 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6555 mr r25,r11 ; Save caller's msr image
6557 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6558 bl sxlkExclusive ; Get lock exlusive
6560 mr r31,r22 ; Prepare to search this group
6561 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6562 mtctr r0 ; in this group
6563 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6565 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6566 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6567 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6568 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6569 b gad32SrchLp ; Let the search begin!
6573 mr r6,r3 ; r6 <- current mapping slot's flags
6574 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6575 mr r7,r4 ; r7 <- current mapping slot's space ID
6576 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6577 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6578 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6579 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6580 xor r7,r7,r9 ; Compare space ID
6581 or r0,r11,r7 ; r0 <- !(!free && space match)
6582 xor r8,r8,r12 ; Compare virtual address
6583 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6584 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6586 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6587 bdnz gad32SrchLp ; Iterate
6589 mr r6,r3 ; r6 <- current mapping slot's flags
6590 clrrwi r5,r5,12 ; Remove flags from virtual address
6591 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6592 xor r4,r4,r9 ; Compare space ID
6593 or r0,r11,r4 ; r0 <- !(!free && && space match)
6594 xor r5,r5,r12 ; Compare virtual address
6595 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6596 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6597 b gadScan ; No joy in our hash group
6600 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6601 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6602 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6603 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6604 b gad64SrchLp ; Let the search begin!
6608 mr r6,r3 ; r6 <- current mapping slot's flags
6609 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6610 mr r7,r4 ; r7 <- current mapping slot's space ID
6611 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6612 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6613 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6614 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6615 xor r7,r7,r9 ; Compare space ID
6616 or r0,r11,r7 ; r0 <- !(!free && space match)
6617 xor r8,r8,r12 ; Compare virtual address
6618 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6619 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6621 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6622 bdnz gad64SrchLp ; Iterate
6624 mr r6,r3 ; r6 <- current mapping slot's flags
6625 clrrdi r5,r5,12 ; Remove flags from virtual address
6626 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6627 xor r4,r4,r9 ; Compare space ID
6628 or r0,r11,r4 ; r0 <- !(!free && && space match)
6629 xor r5,r5,r12 ; Compare virtual address
6630 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6631 bne gadScan ; No joy in our hash group
6632 b gadRelPmap ; Hit, let upper-level redrive sort it out
6634 gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6635 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6636 ; Prepare to address slot at cursor
6637 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6638 mtctr r0 ; in this group
6639 or r2,r22,r12 ; r2 <- 1st mapping to search
6640 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6641 li r11,0 ; No dormant entries found yet
6642 b gadScanLoop ; Let the search begin!
6646 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6647 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6648 ; Trim off any carry, wrapping into slot number range
6649 mr r31,r2 ; r31 <- current mapping's address
6650 or r2,r22,r12 ; r2 <- next mapping to search
6651 mr r6,r3 ; r6 <- current mapping slot's flags
6652 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6653 rlwinm. r0,r6,0,mpgFree ; Test free flag
6654 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6655 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6656 xori r0,r0,mpgDormant ; Invert dormant flag
6657 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6658 bne gadNotDorm ; Not dormant or we've already seen one
6659 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6660 gadNotDorm: bdnz gadScanLoop ; Iterate
6662 mr r31,r2 ; r31 <- final mapping's address
6663 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6664 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6665 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6666 xori r0,r0,mpgDormant ; Invert dormant flag
6667 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6668 bne gadCkDormant ; Not dormant or we've already seen one
6669 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6672 mr. r31,r11 ; Get dormant mapping, if any, and test
6673 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6676 lbz r12,mpgCursor(r22) ; Get group's cursor
6677 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6678 ; Prepare to address slot at cursor
6679 or r31,r22,r12 ; r31 <- address of mapping to steal
6681 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6682 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6683 ; r31 <- mapping's physical address
6684 ; r3 -> PTE slot physical address
6685 ; r4 -> High-order 32 bits of PTE
6686 ; r5 -> Low-order 32 bits of PTE
6688 ; r7 -> PCA physical address
6689 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6690 b gadFreePTE ; Join 64-bit path to release the PTE
6691 gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6692 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6693 gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6694 beq- gadUpCursor ; No valid PTE, we're almost done
6695 lis r0,0x8000 ; Prepare free bit for this slot
6696 srw r0,r0,r2 ; Position free bit
6697 or r6,r6,r0 ; Set it in our PCA image
6698 lwz r8,mpPte(r31) ; Get PTE pointer
6699 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6700 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6701 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6702 stw r6,0(r7) ; Update PCA and unlock the PTEG
6705 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6706 ; Recover slot number from stolen mapping's address
6707 addi r12,r12,1 ; Increment slot number
6708 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6709 stb r12,mpgCursor(r22) ; Update group's cursor
6711 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6712 bl mapFindLockPN ; Find 'n' lock this page's physent
6713 mr. r26,r3 ; Got lock on our physent?
6714 beq-- gadBadPLock ; No, time to bail out
6716 crset cr1_eq ; cr1_eq <- previous link is the anchor
6717 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6718 la r11,ppLink+4(r26) ; Point to chain anchor
6719 lwz r9,ppLink+4(r26) ; Get chain anchor
6720 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6721 gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6722 cmplw r9,r31 ; Is this the mapping to remove?
6723 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6724 bne gadRemNext ; No, chain onward
6725 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6726 stw r8,0(r11) ; Unchain gpv->phys mapping
6727 b gadDelDone ; Finish deleting mapping
6729 lwarx r0,0,r11 ; Get previous link
6730 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6731 stwcx. r0,0,r11 ; Update previous link
6732 bne- gadRemRetry ; Lost reservation, retry
6733 b gadDelDone ; Finish deleting mapping
6735 gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6736 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6737 mr. r9,r8 ; Does next entry exist?
6738 b gadRemLoop ; Carry on
6741 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6742 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6743 la r11,ppLink(r26) ; Point to chain anchor
6744 ld r9,ppLink(r26) ; Get chain anchor
6745 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6746 gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6747 cmpld r9,r31 ; Is this the mapping to remove?
6748 ld r8,mpAlias(r9) ; Get forward chain pinter
6749 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6750 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6751 std r8,0(r11) ; Unchain gpv->phys mapping
6752 b gadDelDone ; Finish deleting mapping
6753 gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6754 and r0,r0,r7 ; Get flags
6755 or r0,r0,r8 ; Insert new forward pointer
6756 stdcx. r0,0,r11 ; Slam it back in
6757 bne-- gadRem64Rt ; Lost reservation, retry
6758 b gadDelDone ; Finish deleting mapping
6762 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6763 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6764 mr. r9,r8 ; Does next entry exist?
6765 b gadRem64Lp ; Carry on
6768 mr r3,r26 ; Get physent address
6769 bl mapPhysUnlock ; Unlock physent chain
6772 lwz r12,pmapSpace(r28) ; Get guest space id number
6773 li r2,0 ; Get a zero
6774 stw r24,mpFlags(r31) ; Set mapping's flags
6775 sth r12,mpSpace(r31) ; Set mapping's space id number
6776 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6777 stw r29,mpPAddr(r31) ; Set mapping's physical address
6778 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6779 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6780 b gadChain ; Continue with chaining mapping to physent
6781 gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6783 gadChain: mr r3,r29 ; r3 <- physical frame address
6784 bl mapFindLockPN ; Find 'n' lock this page's physent
6785 mr. r26,r3 ; Got lock on our physent?
6786 beq-- gadBadPLock ; No, time to bail out
6788 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6789 lwz r12,ppLink+4(r26) ; Get forward chain
6790 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6791 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6792 stw r11,mpAlias+4(r31) ; New mapping will head chain
6793 stw r12,ppLink+4(r26) ; Point physent to new mapping
6794 b gadFinish ; All over now...
6796 gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6797 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6798 ld r12,ppLink(r26) ; Get forward chain
6799 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6800 and r12,r12,r7 ; Isolate pointer's flags
6801 or r12,r12,r31 ; Insert new mapping's address forming pointer
6802 std r11,mpAlias(r31) ; New mapping will head chain
6803 std r12,ppLink(r26) ; Point physent to new mapping
6805 gadFinish: eieio ; Ensure new mapping is completely visible
6807 gadRelPhy: mr r3,r26 ; r3 <- physent addr
6808 bl mapPhysUnlock ; Unlock physent chain
6810 gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6811 bl sxlkUnlock ; Release host pmap search lock
6813 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6814 mtmsr r25 ; Restore 'rupts, translation
6815 isync ; Throw a small wrench into the pipeline
6816 b gadPopFrame ; Nothing to do now but pop a frame and return
6817 gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6819 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6820 ; Get caller's return address
6821 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6822 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6823 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6824 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6825 mtlr r0 ; Prepare return address
6826 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6827 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6828 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6829 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6830 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6831 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6832 lwz r1,0(r1) ; Pop stack frame
6833 blr ; Return to caller
6837 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6838 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6839 li r3,failMapping ; The BOMB, Dmitri.
6840 sc ; The hydrogen bomb.
6844 ; Guest shadow assist -- supend a guest mapping
6846 ; Suspends a guest mapping.
6849 ; r3 : address of host pmap, 32-bit kernel virtual address
6850 ; r4 : address of guest pmap, 32-bit kernel virtual address
6851 ; r5 : guest virtual address, high-order 32 bits
6852 ; r6 : guest virtual address, low-order 32 bits
6854 ; Non-volatile register usage:
6855 ; r26 : VMM extension block's physical address
6856 ; r27 : host pmap physical address
6857 ; r28 : guest pmap physical address
6858 ; r29 : caller's msr image from mapSetUp
6859 ; r30 : guest virtual address
6860 ; r31 : gva->phys mapping's physical address
6864 .globl EXT(hw_susp_map_gv)
6866 LEXT(hw_susp_map_gv)
6868 #define gsuStackSize ((31-26+1)*4)+4
6870 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6871 ; Mint a new stack frame
6872 mflr r0 ; Get caller's return address
6873 mfsprg r11,2 ; Get feature flags
6874 mtcrf 0x02,r11 ; Insert feature flags into cr6
6875 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6876 ; Save caller's return address
6877 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6878 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6879 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6880 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6881 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6882 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6884 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6886 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6887 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6888 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6890 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6891 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6892 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6893 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6894 srwi r11,r30,12 ; Form shadow hash:
6895 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6896 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6897 ; Form index offset from hash page number
6898 add r31,r31,r10 ; r31 <- hash page index entry
6899 lwz r31,4(r31) ; r31 <- hash page paddr
6900 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6901 ; r31 <- hash group paddr
6902 b gsuStart ; Get to it
6903 gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6904 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6905 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6906 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6907 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6908 srwi r11,r30,12 ; Form shadow hash:
6909 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6910 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6911 ; Form index offset from hash page number
6912 add r31,r31,r10 ; r31 <- hash page index entry
6913 ld r31,0(r31) ; r31 <- hash page paddr
6914 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6915 ; r31 <- hash group paddr
6917 gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6918 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6919 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6920 mr r29,r11 ; Save caller's msr image
6922 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6923 bl sxlkExclusive ; Get lock exclusive
6925 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6926 mtctr r0 ; in this group
6927 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6929 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6930 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6931 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6932 b gsu32SrchLp ; Let the search begin!
6936 mr r6,r3 ; r6 <- current mapping slot's flags
6937 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6938 mr r7,r4 ; r7 <- current mapping slot's space ID
6939 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6940 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6941 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6942 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6943 xor r7,r7,r9 ; Compare space ID
6944 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6945 xor r8,r8,r30 ; Compare virtual address
6946 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6947 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6949 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6950 bdnz gsu32SrchLp ; Iterate
6952 mr r6,r3 ; r6 <- current mapping slot's flags
6953 clrrwi r5,r5,12 ; Remove flags from virtual address
6954 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6955 xor r4,r4,r9 ; Compare space ID
6956 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6957 xor r5,r5,r30 ; Compare virtual address
6958 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6959 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6960 b gsuSrchMiss ; No joy in our hash group
6963 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6964 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6965 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6966 b gsu64SrchLp ; Let the search begin!
6970 mr r6,r3 ; r6 <- current mapping slot's flags
6971 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6972 mr r7,r4 ; r7 <- current mapping slot's space ID
6973 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6974 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6975 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6976 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6977 xor r7,r7,r9 ; Compare space ID
6978 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6979 xor r8,r8,r30 ; Compare virtual address
6980 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6981 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6983 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6984 bdnz gsu64SrchLp ; Iterate
6986 mr r6,r3 ; r6 <- current mapping slot's flags
6987 clrrdi r5,r5,12 ; Remove flags from virtual address
6988 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6989 xor r4,r4,r9 ; Compare space ID
6990 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6991 xor r5,r5,r30 ; Compare virtual address
6992 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6993 bne gsuSrchMiss ; No joy in our hash group
6996 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
6997 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6998 ; r31 <- mapping's physical address
6999 ; r3 -> PTE slot physical address
7000 ; r4 -> High-order 32 bits of PTE
7001 ; r5 -> Low-order 32 bits of PTE
7003 ; r7 -> PCA physical address
7004 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7005 b gsuFreePTE ; Join 64-bit path to release the PTE
7006 gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7007 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7008 gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
7009 beq- gsuNoPTE ; No valid PTE, we're almost done
7010 lis r0,0x8000 ; Prepare free bit for this slot
7011 srw r0,r0,r2 ; Position free bit
7012 or r6,r6,r0 ; Set it in our PCA image
7013 lwz r8,mpPte(r31) ; Get PTE pointer
7014 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7015 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7016 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7017 stw r6,0(r7) ; Update PCA and unlock the PTEG
7019 gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7020 ori r3,r3,mpgDormant ; Mark entry dormant
7021 stw r3,mpFlags(r31) ; Save updated flags
7022 eieio ; Ensure update is visible when we unlock
7025 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7026 bl sxlkUnlock ; Release host pmap search lock
7028 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7029 mtmsr r29 ; Restore 'rupts, translation
7030 isync ; Throw a small wrench into the pipeline
7031 b gsuPopFrame ; Nothing to do now but pop a frame and return
7032 gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7034 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7035 ; Get caller's return address
7036 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7037 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7038 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7039 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7040 mtlr r0 ; Prepare return address
7041 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7042 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7043 lwz r1,0(r1) ; Pop stack frame
7044 blr ; Return to caller
7047 ; Guest shadow assist -- test guest mapping reference and change bits
7049 ; Locates the specified guest mapping, and if it exists gathers its reference
7050 ; and change bit, optionallyÊresetting them.
7053 ; r3 : address of host pmap, 32-bit kernel virtual address
7054 ; r4 : address of guest pmap, 32-bit kernel virtual address
7055 ; r5 : guest virtual address, high-order 32 bits
7056 ; r6 : guest virtual address, low-order 32 bits
7057 ; r7 : reset boolean
7059 ; Non-volatile register usage:
7060 ; r24 : VMM extension block's physical address
7061 ; r25 : return code (w/reference and change bits)
7062 ; r26 : reset boolean
7063 ; r27 : host pmap physical address
7064 ; r28 : guest pmap physical address
7065 ; r29 : caller's msr image from mapSetUp
7066 ; r30 : guest virtual address
7067 ; r31 : gva->phys mapping's physical address
7071 .globl EXT(hw_test_rc_gv)
7075 #define gtdStackSize ((31-24+1)*4)+4
7077 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7078 ; Mint a new stack frame
7079 mflr r0 ; Get caller's return address
7080 mfsprg r11,2 ; Get feature flags
7081 mtcrf 0x02,r11 ; Insert feature flags into cr6
7082 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7083 ; Save caller's return address
7084 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7085 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7086 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7087 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7088 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7089 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7090 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7091 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7093 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
7095 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7096 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
7098 bt++ pf64Bitb,gtd64Salt ; Test for 64-bit machine
7100 lwz r24,pmapVmmExtPhys+4(r3) ; r24 <- VMM pmap extension block paddr
7101 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
7102 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
7103 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7104 srwi r11,r30,12 ; Form shadow hash:
7105 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7106 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7107 ; Form index offset from hash page number
7108 add r31,r31,r10 ; r31 <- hash page index entry
7109 lwz r31,4(r31) ; r31 <- hash page paddr
7110 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7111 ; r31 <- hash group paddr
7112 b gtdStart ; Get to it
7114 gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7115 ld r24,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7116 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
7117 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
7118 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7119 srwi r11,r30,12 ; Form shadow hash:
7120 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7121 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7122 ; Form index offset from hash page number
7123 add r31,r31,r10 ; r31 <- hash page index entry
7124 ld r31,0(r31) ; r31 <- hash page paddr
7125 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7126 ; r31 <- hash group paddr
7128 gtdStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
7129 xor r28,r4,r28 ; Convert guest pmap_t virt->real
7130 mr r26,r7 ; Save reset boolean
7131 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7132 mr r29,r11 ; Save caller's msr image
7134 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7135 bl sxlkExclusive ; Get lock exclusive
7137 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7138 mtctr r0 ; in this group
7139 bt++ pf64Bitb,gtd64Search ; Test for 64-bit machine
7141 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7142 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7143 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7144 b gtd32SrchLp ; Let the search begin!
7148 mr r6,r3 ; r6 <- current mapping slot's flags
7149 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7150 mr r7,r4 ; r7 <- current mapping slot's space ID
7151 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7152 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7153 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7154 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7155 xor r7,r7,r9 ; Compare space ID
7156 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7157 xor r8,r8,r30 ; Compare virtual address
7158 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7159 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7161 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7162 bdnz gtd32SrchLp ; Iterate
7164 mr r6,r3 ; r6 <- current mapping slot's flags
7165 clrrwi r5,r5,12 ; Remove flags from virtual address
7166 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7167 xor r4,r4,r9 ; Compare space ID
7168 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7169 xor r5,r5,r30 ; Compare virtual address
7170 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7171 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7172 b gtdSrchMiss ; No joy in our hash group
7175 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7176 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7177 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7178 b gtd64SrchLp ; Let the search begin!
7182 mr r6,r3 ; r6 <- current mapping slot's flags
7183 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7184 mr r7,r4 ; r7 <- current mapping slot's space ID
7185 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7186 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7187 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7188 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7189 xor r7,r7,r9 ; Compare space ID
7190 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7191 xor r8,r8,r30 ; Compare virtual address
7192 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7193 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7195 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7196 bdnz gtd64SrchLp ; Iterate
7198 mr r6,r3 ; r6 <- current mapping slot's flags
7199 clrrdi r5,r5,12 ; Remove flags from virtual address
7200 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7201 xor r4,r4,r9 ; Compare space ID
7202 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7203 xor r5,r5,r30 ; Compare virtual address
7204 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7205 bne gtdSrchMiss ; No joy in our hash group
7208 bt++ pf64Bitb,gtdDo64 ; Split for 64 bit
7210 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
7212 cmplwi cr1,r26,0 ; Do we want to clear RC?
7213 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7214 mr. r3,r3 ; Was there a previously valid PTE?
7215 li r0,lo16(mpR|mpC) ; Get bits to clear
7217 and r25,r5,r0 ; Copy RC bits into result
7218 beq++ cr1,gtdNoClr32 ; Nope...
7220 andc r12,r12,r0 ; Clear mapping copy of RC
7221 andc r5,r5,r0 ; Clear PTE copy of RC
7222 sth r12,mpVAddr+6(r31) ; Set the new RC in mapping
7224 gtdNoClr32: beq-- gtdNoOld32 ; No previously valid PTE...
7226 sth r5,6(r3) ; Store updated RC in PTE
7227 eieio ; Make sure we do not reorder
7228 stw r4,0(r3) ; Revalidate the PTE
7230 eieio ; Make sure all updates come first
7231 stw r6,0(r7) ; Unlock PCA
7233 gtdNoOld32: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7234 bl sxlkUnlock ; Unlock the search list
7235 b gtdR32 ; Join common...
7240 gtdDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
7242 cmplwi cr1,r26,0 ; Do we want to clear RC?
7243 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7244 mr. r3,r3 ; Was there a previously valid PTE?
7245 li r0,lo16(mpR|mpC) ; Get bits to clear
7247 and r25,r5,r0 ; Copy RC bits into result
7248 beq++ cr1,gtdNoClr64 ; Nope...
7250 andc r12,r12,r0 ; Clear mapping copy of RC
7251 andc r5,r5,r0 ; Clear PTE copy of RC
7252 sth r12,mpVAddr+6(r31) ; Set the new RC
7254 gtdNoClr64: beq-- gtdNoOld64 ; Nope, no pevious pte...
7256 sth r5,14(r3) ; Store updated RC
7257 eieio ; Make sure we do not reorder
7258 std r4,0(r3) ; Revalidate the PTE
7260 eieio ; Make sure all updates come first
7261 stw r6,0(r7) ; Unlock PCA
7263 gtdNoOld64: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7264 bl sxlkUnlock ; Unlock the search list
7265 b gtdR64 ; Join common...
7268 la r3,pmapSXlk(r27) ; Point to the pmap search lock
7269 bl sxlkUnlock ; Unlock the search list
7270 li r25,mapRtNotFnd ; Get ready to return not found
7271 bt++ pf64Bitb,gtdR64 ; Test for 64-bit machine
7273 gtdR32: mtmsr r29 ; Restore caller's msr image
7277 gtdR64: mtmsrd r29 ; Restore caller's msr image
7279 gtdEpilog: lwz r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7280 ; Get caller's return address
7281 mr r3,r25 ; Get return code
7282 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7283 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7284 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7285 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7286 mtlr r0 ; Prepare return address
7287 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7288 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7289 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7290 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7291 lwz r1,0(r1) ; Pop stack frame
7292 blr ; Return to caller
7295 ; Guest shadow assist -- convert guest to host virtual address
7297 ; Locates the specified guest mapping, and if it exists locates the
7298 ; first mapping belonging to its host on the physical chain and returns
7299 ; its virtual address.
7301 ; Note that if there are multiple mappings belonging to this host
7302 ; chained to the physent to which the guest mapping is chained, then
7303 ; host virtual aliases exist for this physical address. If host aliases
7304 ; exist, then we select the first on the physent chain, making it
7305 ; unpredictable which of the two or more possible host virtual addresses
7309 ; r3 : address of guest pmap, 32-bit kernel virtual address
7310 ; r4 : guest virtual address, high-order 32 bits
7311 ; r5 : guest virtual address, low-order 32 bits
7313 ; Non-volatile register usage:
7314 ; r24 : physent physical address
7315 ; r25 : VMM extension block's physical address
7316 ; r26 : host virtual address
7317 ; r27 : host pmap physical address
7318 ; r28 : guest pmap physical address
7319 ; r29 : caller's msr image from mapSetUp
7320 ; r30 : guest virtual address
7321 ; r31 : gva->phys mapping's physical address
7325 .globl EXT(hw_gva_to_hva)
7329 #define gthStackSize ((31-24+1)*4)+4
7331 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7332 ; Mint a new stack frame
7333 mflr r0 ; Get caller's return address
7334 mfsprg r11,2 ; Get feature flags
7335 mtcrf 0x02,r11 ; Insert feature flags into cr6
7336 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7337 ; Save caller's return address
7338 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7339 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7340 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7341 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7342 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7343 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7344 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7345 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7347 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7349 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7350 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7352 bt++ pf64Bitb,gth64Salt ; Test for 64-bit machine
7354 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7355 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7356 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7357 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7358 srwi r11,r30,12 ; Form shadow hash:
7359 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7360 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7361 ; Form index offset from hash page number
7362 add r31,r31,r10 ; r31 <- hash page index entry
7363 lwz r31,4(r31) ; r31 <- hash page paddr
7364 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7365 ; r31 <- hash group paddr
7366 b gthStart ; Get to it
7368 gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7369 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7370 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7371 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7372 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7373 srwi r11,r30,12 ; Form shadow hash:
7374 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7375 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7376 ; Form index offset from hash page number
7377 add r31,r31,r10 ; r31 <- hash page index entry
7378 ld r31,0(r31) ; r31 <- hash page paddr
7379 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7380 ; r31 <- hash group paddr
7382 gthStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7383 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7384 mr r29,r11 ; Save caller's msr image
7386 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7387 bl sxlkExclusive ; Get lock exclusive
7389 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7390 mtctr r0 ; in this group
7391 bt++ pf64Bitb,gth64Search ; Test for 64-bit machine
7393 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7394 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7395 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7396 b gth32SrchLp ; Let the search begin!
7400 mr r6,r3 ; r6 <- current mapping slot's flags
7401 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7402 mr r7,r4 ; r7 <- current mapping slot's space ID
7403 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7404 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7405 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7406 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7407 xor r7,r7,r9 ; Compare space ID
7408 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7409 xor r8,r8,r30 ; Compare virtual address
7410 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7411 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7413 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7414 bdnz gth32SrchLp ; Iterate
7416 mr r6,r3 ; r6 <- current mapping slot's flags
7417 clrrwi r5,r5,12 ; Remove flags from virtual address
7418 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7419 xor r4,r4,r9 ; Compare space ID
7420 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7421 xor r5,r5,r30 ; Compare virtual address
7422 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7423 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7424 b gthSrchMiss ; No joy in our hash group
7427 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7428 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7429 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7430 b gth64SrchLp ; Let the search begin!
7434 mr r6,r3 ; r6 <- current mapping slot's flags
7435 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7436 mr r7,r4 ; r7 <- current mapping slot's space ID
7437 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7438 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7439 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7440 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7441 xor r7,r7,r9 ; Compare space ID
7442 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7443 xor r8,r8,r30 ; Compare virtual address
7444 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7445 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7447 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7448 bdnz gth64SrchLp ; Iterate
7450 mr r6,r3 ; r6 <- current mapping slot's flags
7451 clrrdi r5,r5,12 ; Remove flags from virtual address
7452 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7453 xor r4,r4,r9 ; Compare space ID
7454 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7455 xor r5,r5,r30 ; Compare virtual address
7456 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7457 bne gthSrchMiss ; No joy in our hash group
7459 gthSrchHit: lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
7460 bl mapFindLockPN ; Find 'n' lock this page's physent
7461 mr. r24,r3 ; Got lock on our physent?
7462 beq-- gthBadPLock ; No, time to bail out
7464 bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
7466 lwz r9,ppLink+4(r24) ; Get first mapping on physent
7467 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7468 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
7469 gthPELoop: mr. r12,r9 ; Got a mapping to look at?
7470 beq- gthPEMiss ; Nope, we've missed hva->phys mapping
7471 lwz r7,mpFlags(r12) ; Get mapping's flags
7472 lhz r4,mpSpace(r12) ; Get mapping's space id number
7473 lwz r26,mpVAddr+4(r12) ; Get mapping's virtual address
7474 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
7476 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7477 rlwinm r26,r26,0,~mpHWFlags ; Bye-bye unsightly flags
7478 xori r0,r0,mpNormal ; Normal mapping?
7479 xor r4,r4,r6 ; Compare w/ host space id number
7480 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7482 b gthPELoop ; Iterate
7484 gthPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
7485 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
7486 ld r9,ppLink(r24) ; Get first mapping on physent
7487 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7488 andc r9,r9,r0 ; Cleanup mapping pointer
7489 gthPELp64: mr. r12,r9 ; Got a mapping to look at?
7490 beq-- gthPEMiss ; Nope, we've missed hva->phys mapping
7491 lwz r7,mpFlags(r12) ; Get mapping's flags
7492 lhz r4,mpSpace(r12) ; Get mapping's space id number
7493 ld r26,mpVAddr(r12) ; Get mapping's virtual address
7494 ld r9,mpAlias(r12) ; Next mapping physent alias chain
7495 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7496 rldicr r26,r26,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
7497 xori r0,r0,mpNormal ; Normal mapping?
7498 xor r4,r4,r6 ; Compare w/ host space id number
7499 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7501 b gthPELp64 ; Iterate
7504 gthPEMiss: mr r3,r24 ; Get physent's address
7505 bl mapPhysUnlock ; Unlock physent chain
7507 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7508 bl sxlkUnlock ; Release host pmap search lock
7509 li r3,-1 ; Return 64-bit -1
7511 bt++ pf64Bitb,gthEpi64 ; Take 64-bit exit
7512 b gthEpi32 ; Take 32-bit exit
7515 gthPEHit: mr r3,r24 ; Get physent's address
7516 bl mapPhysUnlock ; Unlock physent chain
7517 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7518 bl sxlkUnlock ; Release host pmap search lock
7520 bt++ pf64Bitb,gthR64 ; Test for 64-bit machine
7522 gthR32: li r3,0 ; High-order 32 bits host virtual address
7523 mr r4,r26 ; Low-order 32 bits host virtual address
7524 gthEpi32: mtmsr r29 ; Restore caller's msr image
7529 gthR64: srdi r3,r26,32 ; High-order 32 bits host virtual address
7530 clrldi r4,r26,32 ; Low-order 32 bits host virtual address
7531 gthEpi64: mtmsrd r29 ; Restore caller's msr image
7533 gthEpilog: lwz r0,(FM_ALIGN(gthStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7534 ; Get caller's return address
7535 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7536 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7537 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7538 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7539 mtlr r0 ; Prepare return address
7540 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7541 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7542 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7543 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7544 lwz r1,0(r1) ; Pop stack frame
7545 blr ; Return to caller
7548 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
7549 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
7550 li r3,failMapping ; The BOMB, Dmitri.
7551 sc ; The hydrogen bomb.
7555 ; Guest shadow assist -- find a guest mapping
7557 ; Locates the specified guest mapping, and if it exists returns a copy
7561 ; r3 : address of guest pmap, 32-bit kernel virtual address
7562 ; r4 : guest virtual address, high-order 32 bits
7563 ; r5 : guest virtual address, low-order 32 bits
7564 ; r6 : 32 byte copy area, 32-bit kernel virtual address
7566 ; Non-volatile register usage:
7567 ; r25 : VMM extension block's physical address
7568 ; r26 : copy area virtual address
7569 ; r27 : host pmap physical address
7570 ; r28 : guest pmap physical address
7571 ; r29 : caller's msr image from mapSetUp
7572 ; r30 : guest virtual address
7573 ; r31 : gva->phys mapping's physical address
7577 .globl EXT(hw_find_map_gv)
7579 LEXT(hw_find_map_gv)
7581 #define gfmStackSize ((31-25+1)*4)+4
7583 stwu r1,-(FM_ALIGN(gfmStackSize)+FM_SIZE)(r1)
7584 ; Mint a new stack frame
7585 mflr r0 ; Get caller's return address
7586 mfsprg r11,2 ; Get feature flags
7587 mtcrf 0x02,r11 ; Insert feature flags into cr6
7588 stw r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7589 ; Save caller's return address
7590 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7591 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7592 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7593 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7594 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7595 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7596 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7598 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7599 mr r26,r6 ; Copy copy buffer vaddr
7601 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7602 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7604 bt++ pf64Bitb,gfm64Salt ; Test for 64-bit machine
7606 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7607 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7608 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7609 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7610 srwi r11,r30,12 ; Form shadow hash:
7611 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7612 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7613 ; Form index offset from hash page number
7614 add r31,r31,r10 ; r31 <- hash page index entry
7615 lwz r31,4(r31) ; r31 <- hash page paddr
7616 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7617 ; r31 <- hash group paddr
7618 b gfmStart ; Get to it
7620 gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7621 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7622 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7623 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7624 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7625 srwi r11,r30,12 ; Form shadow hash:
7626 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7627 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7628 ; Form index offset from hash page number
7629 add r31,r31,r10 ; r31 <- hash page index entry
7630 ld r31,0(r31) ; r31 <- hash page paddr
7631 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7632 ; r31 <- hash group paddr
7634 gfmStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7635 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7636 mr r29,r11 ; Save caller's msr image
7638 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7639 bl sxlkExclusive ; Get lock exclusive
7641 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7642 mtctr r0 ; in this group
7643 bt++ pf64Bitb,gfm64Search ; Test for 64-bit machine
7645 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7646 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7647 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7648 b gfm32SrchLp ; Let the search begin!
7652 mr r6,r3 ; r6 <- current mapping slot's flags
7653 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7654 mr r7,r4 ; r7 <- current mapping slot's space ID
7655 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7656 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7657 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7658 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7659 xor r7,r7,r9 ; Compare space ID
7660 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7661 xor r8,r8,r30 ; Compare virtual address
7662 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7663 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7665 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7666 bdnz gfm32SrchLp ; Iterate
7668 mr r6,r3 ; r6 <- current mapping slot's flags
7669 clrrwi r5,r5,12 ; Remove flags from virtual address
7670 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7671 xor r4,r4,r9 ; Compare space ID
7672 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7673 xor r5,r5,r30 ; Compare virtual address
7674 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7675 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7676 b gfmSrchMiss ; No joy in our hash group
7679 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7680 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7681 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7682 b gfm64SrchLp ; Let the search begin!
7686 mr r6,r3 ; r6 <- current mapping slot's flags
7687 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7688 mr r7,r4 ; r7 <- current mapping slot's space ID
7689 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7690 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7691 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7692 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7693 xor r7,r7,r9 ; Compare space ID
7694 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7695 xor r8,r8,r30 ; Compare virtual address
7696 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7697 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7699 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7700 bdnz gfm64SrchLp ; Iterate
7702 mr r6,r3 ; r6 <- current mapping slot's flags
7703 clrrdi r5,r5,12 ; Remove flags from virtual address
7704 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7705 xor r4,r4,r9 ; Compare space ID
7706 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7707 xor r5,r5,r30 ; Compare virtual address
7708 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7709 bne gfmSrchMiss ; No joy in our hash group
7711 gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
7714 lwz r8,12(r31) ; +12
7715 lwz r9,16(r31) ; +16
7716 lwz r10,20(r31) ; +20
7717 lwz r11,24(r31) ; +24
7718 lwz r12,28(r31) ; +28
7720 li r31,mapRtOK ; Return found mapping
7722 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7723 bl sxlkUnlock ; Release host pmap search lock
7725 bt++ pf64Bitb,gfmEpi64 ; Test for 64-bit machine
7727 gfmEpi32: mtmsr r29 ; Restore caller's msr image
7728 isync ; A small wrench
7729 b gfmEpilog ; and a larger bubble
7732 gfmEpi64: mtmsrd r29 ; Restore caller's msr image
7734 gfmEpilog: mr. r3,r31 ; Copy/test mapping address
7735 beq gfmNotFound ; Skip copy if no mapping found
7737 stw r5,0(r26) ; Store 32 bytes of mapping into virtual
7740 stw r8,12(r26) ; +12
7741 stw r9,16(r26) ; +16
7742 stw r10,20(r26) ; +20
7743 stw r11,24(r26) ; +24
7744 stw r12,28(r26) ; +28
7747 lwz r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7748 ; Get caller's return address
7749 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7750 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7751 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7752 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7753 mtlr r0 ; Prepare return address
7754 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7755 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7756 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7757 lwz r1,0(r1) ; Pop stack frame
7758 blr ; Return to caller
7762 li r31,mapRtNotFnd ; Indicate mapping not found
7763 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7764 bl sxlkUnlock ; Release host pmap search lock
7765 bt++ pf64Bitb,gfmEpi64 ; Take 64-bit exit
7766 b gfmEpi32 ; Take 32-bit exit
7770 ; Guest shadow assist -- change guest page protection
7772 ; Locates the specified dormant mapping, and if it is active, changes its
7776 ; r3 : address of guest pmap, 32-bit kernel virtual address
7777 ; r4 : guest virtual address, high-order 32 bits
7778 ; r5 : guest virtual address, low-order 32 bits
7779 ; r6 : guest mapping protection code
7781 ; Non-volatile register usage:
7782 ; r25 : caller's msr image from mapSetUp
7783 ; r26 : guest mapping protection code
7784 ; r27 : host pmap physical address
7785 ; r28 : guest pmap physical address
7786 ; r29 : VMM extension block's physical address
7787 ; r30 : guest virtual address
7788 ; r31 : gva->phys mapping's physical address
7791 .globl EXT(hw_protect_gv)
7795 #define gcpStackSize ((31-24+1)*4)+4
7797 stwu r1,-(FM_ALIGN(gcpStackSize)+FM_SIZE)(r1)
7798 ; Mint a new stack frame
7799 mflr r0 ; Get caller's return address
7800 mfsprg r11,2 ; Get feature flags
7801 mtcrf 0x02,r11 ; Insert feature flags into cr6
7802 stw r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7803 ; Save caller's return address
7804 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7805 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7806 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7807 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7808 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7809 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7810 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7812 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7813 mr r26,r6 ; Copy guest mapping protection code
7815 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7816 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7817 bt++ pf64Bitb,gcp64Salt ; Handle 64-bit machine separately
7818 lwz r29,pmapVmmExtPhys+4(r3) ; r29 <- VMM pmap extension block paddr
7819 lwz r27,vmxHostPmapPhys+4(r11) ; r27 <- host pmap paddr
7820 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7821 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7822 srwi r11,r30,12 ; Form shadow hash:
7823 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7824 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7825 ; Form index offset from hash page number
7826 add r31,r31,r10 ; r31 <- hash page index entry
7827 lwz r31,4(r31) ; r31 <- hash page paddr
7828 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7829 ; r31 <- hash group paddr
7830 b gcpStart ; Get to it
7832 gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7833 ld r29,pmapVmmExtPhys(r3) ; r29 <- VMM pmap extension block paddr
7834 ld r27,vmxHostPmapPhys(r11) ; r27 <- host pmap paddr
7835 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7836 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7837 srwi r11,r30,12 ; Form shadow hash:
7838 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7839 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7840 ; Form index offset from hash page number
7841 add r31,r31,r10 ; r31 <- hash page index entry
7842 ld r31,0(r31) ; r31 <- hash page paddr
7843 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7844 ; r31 <- hash group paddr
7846 gcpStart: xor r28,r4,r28 ; Convert guest pmap_t virt->real
7847 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7848 mr r25,r11 ; Save caller's msr image
7850 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7851 bl sxlkExclusive ; Get lock exclusive
7853 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7854 mtctr r0 ; in this group
7855 bt++ pf64Bitb,gcp64Search ; Test for 64-bit machine
7857 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7858 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7859 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7860 b gcp32SrchLp ; Let the search begin!
7864 mr r6,r3 ; r6 <- current mapping slot's flags
7865 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7866 mr r7,r4 ; r7 <- current mapping slot's space ID
7867 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7868 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7869 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7870 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7871 xor r7,r7,r9 ; Compare space ID
7872 or r0,r11,r7 ; r0 <- free || dormant || !space match
7873 xor r8,r8,r30 ; Compare virtual address
7874 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7875 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7877 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7878 bdnz gcp32SrchLp ; Iterate
7880 mr r6,r3 ; r6 <- current mapping slot's flags
7881 clrrwi r5,r5,12 ; Remove flags from virtual address
7882 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7883 xor r4,r4,r9 ; Compare space ID
7884 or r0,r11,r4 ; r0 <- free || dormant || !space match
7885 xor r5,r5,r30 ; Compare virtual address
7886 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7887 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7888 b gcpSrchMiss ; No joy in our hash group
7891 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7892 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7893 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7894 b gcp64SrchLp ; Let the search begin!
7898 mr r6,r3 ; r6 <- current mapping slot's flags
7899 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7900 mr r7,r4 ; r7 <- current mapping slot's space ID
7901 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7902 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7903 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7904 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7905 xor r7,r7,r9 ; Compare space ID
7906 or r0,r11,r7 ; r0 <- free || dormant || !space match
7907 xor r8,r8,r30 ; Compare virtual address
7908 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7909 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7911 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7912 bdnz gcp64SrchLp ; Iterate
7914 mr r6,r3 ; r6 <- current mapping slot's flags
7915 clrrdi r5,r5,12 ; Remove flags from virtual address
7916 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7917 xor r4,r4,r9 ; Compare space ID
7918 or r0,r11,r4 ; r0 <- free || dormant || !space match
7919 xor r5,r5,r30 ; Compare virtual address
7920 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7921 bne gcpSrchMiss ; No joy in our hash group
7924 bt++ pf64Bitb,gcpDscon64 ; Handle 64-bit disconnect separately
7925 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
7926 ; r31 <- mapping's physical address
7927 ; r3 -> PTE slot physical address
7928 ; r4 -> High-order 32 bits of PTE
7929 ; r5 -> Low-order 32 bits of PTE
7931 ; r7 -> PCA physical address
7932 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7933 b gcpFreePTE ; Join 64-bit path to release the PTE
7934 gcpDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7935 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7936 gcpFreePTE: mr. r3,r3 ; Was there a valid PTE?
7937 beq- gcpSetKey ; No valid PTE, we're almost done
7938 lis r0,0x8000 ; Prepare free bit for this slot
7939 srw r0,r0,r2 ; Position free bit
7940 or r6,r6,r0 ; Set it in our PCA image
7941 lwz r8,mpPte(r31) ; Get PTE pointer
7942 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7943 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7944 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7945 stw r6,0(r7) ; Update PCA and unlock the PTEG
7947 gcpSetKey: lwz r0,mpVAddr+4(r31) ; Get va word containing protection bits
7948 rlwimi r0,r26,0,mpPP ; Insert new protection bits
7949 stw r0,mpVAddr+4(r31) ; Write 'em back
7950 eieio ; Ensure previous mapping updates are visible
7951 li r31,mapRtOK ; I'm a success
7953 gcpRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7954 bl sxlkUnlock ; Release host pmap search lock
7956 mr r3,r31 ; r3 <- result code
7957 bt++ pf64Bitb,gcpRtn64 ; Handle 64-bit separately
7958 mtmsr r25 ; Restore 'rupts, translation
7959 isync ; Throw a small wrench into the pipeline
7960 b gcpPopFrame ; Nothing to do now but pop a frame and return
7961 gcpRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
7963 lwz r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7964 ; Get caller's return address
7965 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7966 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7967 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7968 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7969 mtlr r0 ; Prepare return address
7970 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7971 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7972 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7973 lwz r1,0(r1) ; Pop stack frame
7974 blr ; Return to caller
7978 li r31,mapRtNotFnd ; Could not locate requested mapping
7979 b gcpRelPmap ; Exit through host pmap search lock release
7983 ; Find the physent based on a physical page and try to lock it (but not too hard)
7984 ; Note that this table always has an entry that with a 0 table pointer at the end
7986 ; R3 contains ppnum on entry
7987 ; R3 is 0 if no entry was found
7988 ; R3 is physent if found
7989 ; cr0_eq is true if lock was obtained or there was no entry to lock
7990 ; cr0_eq is false of there was an entry and it was locked
7996 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7997 mr r2,r3 ; Save our target
7998 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
8000 mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
8001 lwz r5,mrStart(r9) ; Get start of table entry
8002 lwz r0,mrEnd(r9) ; Get end of table entry
8003 addi r9,r9,mrSize ; Point to the next slot
8004 cmplwi cr2,r3,0 ; Are we at the end of the table?
8005 cmplw r2,r5 ; See if we are in this table
8006 cmplw cr1,r2,r0 ; Check end also
8007 sub r4,r2,r5 ; Calculate index to physical entry
8008 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
8009 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
8010 slwi r4,r4,3 ; Get offset to physical entry
8012 blt-- mapFindPhz ; Did not find it...
8014 add r3,r3,r4 ; Point right to the slot
8016 mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
8017 rlwinm. r0,r2,0,0,0 ; Is it locked?
8018 bnelr-- ; Yes it is...
8020 lwarx r2,0,r3 ; Get the lock
8021 rlwinm. r0,r2,0,0,0 ; Is it locked?
8022 oris r0,r2,0x8000 ; Set the lock bit
8023 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
8024 stwcx. r0,0,r3 ; Try to stuff it back...
8025 bne-- mapFindOv ; Collision, try again...
8026 isync ; Clear any speculations
8029 mapFindKl: li r2,lgKillResv ; Killing field
8030 stwcx. r2,0,r2 ; Trash reservation...
8031 crclr cr0_eq ; Make sure we do not think we got the lock
8034 mapFindNo: crset cr0_eq ; Make sure that we set this
8035 li r3,0 ; Show that we did not find it
8038 ; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
8040 ; How the pmap cache lookup works:
8042 ; We use a combination of three things: a mask of valid entries, a sub-tag, and the
8043 ; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
8044 ; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
8045 ; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
8046 ; entry contains the full 36 bit ESID.
8048 ; The purpose of the sub-tag is to limit the number of searches necessary when looking
8049 ; for an existing cache entry. Because there are 16 slots in the cache, we could end up
8050 ; searching all 16 if an match is not found.
8052 ; Essentially, we will search only the slots that have a valid entry and whose sub-tag
8053 ; matches. More than likely, we will eliminate almost all of the searches.
8057 ; R4 = ESID high half
8058 ; R5 = ESID low half
8061 ; R3 = pmap cache slot if found, 0 if not
8062 ; R10 = pmapCCtl address
8063 ; R11 = pmapCCtl image
8064 ; pmapCCtl locked on exit
8070 la r10,pmapCCtl(r3) ; Point to the segment cache control
8073 lwarx r11,0,r10 ; Get the segment cache control value
8074 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8075 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
8076 bne-- pmapCacheLookur ; Nope...
8077 stwcx. r0,0,r10 ; Try to take the lock
8078 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
8080 isync ; Make sure we get reservation first
8081 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
8082 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
8083 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
8084 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
8085 lis r8,0x8888 ; Get some eights
8086 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
8087 ori r8,r8,0x8888 ; Fill the rest with eights
8089 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
8090 eqv r9,r9,r5 ; Get 0xF where we hit in top half
8092 rlwinm r2,r10,1,0,30 ; Shift over 1
8093 rlwinm r0,r9,1,0,30 ; Shift over 1
8094 and r2,r2,r10 ; AND the even/odd pair into the even
8095 and r0,r0,r9 ; AND the even/odd pair into the even
8096 rlwinm r10,r2,2,0,28 ; Shift over 2
8097 rlwinm r9,r0,2,0,28 ; Shift over 2
8098 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8099 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8101 and r10,r10,r8 ; Clear out extras
8102 and r9,r9,r8 ; Clear out extras
8104 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
8105 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
8106 or r10,r0,r10 ; Merge them
8107 or r9,r2,r9 ; Merge them
8108 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
8109 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
8110 or r10,r0,r10 ; Merge them
8111 or r9,r2,r9 ; Merge them
8112 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
8113 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
8114 not r6,r11 ; Turn invalid into valid
8115 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
8117 la r10,pmapSegCache(r3) ; Point at the cache slots
8118 and. r6,r9,r6 ; Get mask of valid and hit
8120 li r3,0 ; Assume not found
8121 oris r0,r0,0x8000 ; Start a mask
8122 beqlr++ ; Leave, should usually be no hits...
8124 pclNextEnt: cntlzw r5,r6 ; Find an in use one
8125 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
8126 rlwinm r7,r5,4,0,27 ; Index to the cache entry
8127 srw r2,r0,r5 ; Get validity mask bit
8128 add r7,r7,r10 ; Point to the cache slot
8129 andc r6,r6,r2 ; Clear the validity bit we just tried
8130 bgelr-- cr1 ; Leave if there are no more to check...
8132 lwz r5,sgcESID(r7) ; Get the top half
8134 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
8136 bne++ pclNextEnt ; Nope, try again...
8138 mr r3,r7 ; Point to the slot
8144 li r11,lgKillResv ; The killing spot
8145 stwcx. r11,0,r11 ; Kill the reservation
8148 lwz r11,pmapCCtl(r3) ; Get the segment cache control
8149 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8150 beq++ pmapCacheLookup ; Nope...
8151 b pmapCacheLookus ; Yup, keep waiting...
8155 ; mapMergeRC -- Given a physical mapping address in R31, locate its
8156 ; connected PTE (if any) and merge the PTE referenced and changed bits
8157 ; into the mapping and physent.
8163 lwz r0,mpPte(r31) ; Grab the PTE offset
8164 mfsdr1 r7 ; Get the pointer to the hash table
8165 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8166 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8167 andi. r3,r0,mpHValid ; Is there a possible PTE?
8168 srwi r7,r0,4 ; Convert to PCA units
8169 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8170 mflr r2 ; Save the return
8171 subfic r7,r7,-4 ; Convert to -4 based negative index
8172 add r7,r10,r7 ; Point to the PCA directly
8173 beqlr-- ; There was no PTE to start with...
8175 bl mapLockPteg ; Lock the PTEG
8177 lwz r0,mpPte(r31) ; Grab the PTE offset
8178 mtlr r2 ; Restore the LR
8179 andi. r3,r0,mpHValid ; Is there a possible PTE?
8180 beq- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8182 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8183 add r3,r3,r10 ; Point to actual PTE
8184 lwz r5,4(r3) ; Get the real part of the PTE
8185 srwi r10,r5,12 ; Change physical address to a ppnum
8187 mMNmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8188 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8189 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8190 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8191 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8192 add r11,r11,r8 ; Point to the bank table
8193 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8194 lwz r11,mrStart(r11) ; Get the start of bank
8195 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8196 addi r2,r2,4 ; Offset to last half of field
8197 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8198 sub r11,r10,r11 ; Get the index into the table
8199 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8201 mMmrgRC: lwarx r10,r11,r2 ; Get the master RC
8202 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8203 or r0,r0,r10 ; Merge in the new RC
8204 stwcx. r0,r11,r2 ; Try to stick it back
8205 bne-- mMmrgRC ; Try again if we collided...
8206 eieio ; Commit all updates
8209 stw r6,0(r7) ; Unlock PTEG
8213 ; 64-bit version of mapMergeRC
8218 lwz r0,mpPte(r31) ; Grab the PTE offset
8219 ld r5,mpVAddr(r31) ; Grab the virtual address
8220 mfsdr1 r7 ; Get the pointer to the hash table
8221 rldicr r10,r7,0,45 ; Clean up the hash table base
8222 andi. r3,r0,mpHValid ; Is there a possible PTE?
8223 srdi r7,r0,5 ; Convert to PCA units
8224 rldicr r7,r7,0,61 ; Clean up PCA
8225 subfic r7,r7,-4 ; Convert to -4 based negative index
8226 mflr r2 ; Save the return
8227 add r7,r10,r7 ; Point to the PCA directly
8228 beqlr-- ; There was no PTE to start with...
8230 bl mapLockPteg ; Lock the PTEG
8232 lwz r0,mpPte(r31) ; Grab the PTE offset again
8233 mtlr r2 ; Restore the LR
8234 andi. r3,r0,mpHValid ; Is there a possible PTE?
8235 beq-- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8237 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8238 add r3,r3,r10 ; Point to the actual PTE
8239 ld r5,8(r3) ; Get the real part
8240 srdi r10,r5,12 ; Change physical address to a ppnum
8241 b mMNmerge ; Join the common 32-64-bit code...
8245 ; This routine, given a mapping, will find and lock the PTEG
8246 ; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
8247 ; PTEG and return. In this case we will have undefined in R4
8248 ; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
8250 ; If the mapping is still valid, we will invalidate the PTE and merge
8251 ; the RC bits into the physent and also save them into the mapping.
8253 ; We then return with R3 pointing to the PTE slot, R4 is the
8254 ; top of the PTE and R5 is the bottom. R6 contains the PCA.
8255 ; R7 points to the PCA entry.
8257 ; Note that we should NEVER be called on a block or special mapping.
8258 ; We could do many bad things.
8264 lwz r0,mpPte(r31) ; Grab the PTE offset
8265 mfsdr1 r7 ; Get the pointer to the hash table
8266 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8267 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8268 andi. r3,r0,mpHValid ; Is there a possible PTE?
8269 srwi r7,r0,4 ; Convert to PCA units
8270 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8271 mflr r2 ; Save the return
8272 subfic r7,r7,-4 ; Convert to -4 based negative index
8273 add r7,r10,r7 ; Point to the PCA directly
8274 beqlr-- ; There was no PTE to start with...
8276 bl mapLockPteg ; Lock the PTEG
8278 lwz r0,mpPte(r31) ; Grab the PTE offset
8279 mtlr r2 ; Restore the LR
8280 andi. r3,r0,mpHValid ; Is there a possible PTE?
8281 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8283 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8284 add r3,r3,r10 ; Point to actual PTE
8285 lwz r4,0(r3) ; Get the top of the PTE
8287 li r8,tlbieLock ; Get the TLBIE lock
8288 rlwinm r0,r4,0,1,31 ; Clear the valid bit
8289 stw r0,0(r3) ; Invalidate the PTE
8291 sync ; Make sure everyone sees the invalidate
8293 mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
8294 mfsprg r2,2 ; Get feature flags
8295 mr. r0,r0 ; Is it locked?
8296 li r0,1 ; Get our lock word
8297 bne- mITLBIE32 ; It is locked, go wait...
8299 stwcx. r0,0,r8 ; Try to get it
8300 bne- mITLBIE32 ; We was beat...
8302 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
8303 li r0,0 ; Lock clear value
8305 tlbie r5 ; Invalidate it everywhere
8307 beq- mINoTS32 ; Can not have MP on this machine...
8309 eieio ; Make sure that the tlbie happens first
8310 tlbsync ; Wait for everyone to catch up
8311 sync ; Make sure of it all
8313 mINoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
8314 lwz r5,4(r3) ; Get the real part
8315 srwi r10,r5,12 ; Change physical address to a ppnum
8317 mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8318 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8319 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8320 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8321 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8322 add r11,r11,r8 ; Point to the bank table
8323 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8324 lwz r11,mrStart(r11) ; Get the start of bank
8325 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8326 addi r2,r2,4 ; Offset to last half of field
8327 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8328 sub r11,r10,r11 ; Get the index into the table
8329 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8332 mImrgRC: lwarx r10,r11,r2 ; Get the master RC
8333 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8334 or r0,r0,r10 ; Merge in the new RC
8335 stwcx. r0,r11,r2 ; Try to stick it back
8336 bne-- mImrgRC ; Try again if we collided...
8338 blr ; Leave with the PCA still locked up...
8340 mIPUnlock: eieio ; Make sure all updates come first
8342 stw r6,0(r7) ; Unlock
8351 lwz r0,mpPte(r31) ; Grab the PTE offset
8352 ld r5,mpVAddr(r31) ; Grab the virtual address
8353 mfsdr1 r7 ; Get the pointer to the hash table
8354 rldicr r10,r7,0,45 ; Clean up the hash table base
8355 andi. r3,r0,mpHValid ; Is there a possible PTE?
8356 srdi r7,r0,5 ; Convert to PCA units
8357 rldicr r7,r7,0,61 ; Clean up PCA
8358 subfic r7,r7,-4 ; Convert to -4 based negative index
8359 mflr r2 ; Save the return
8360 add r7,r10,r7 ; Point to the PCA directly
8361 beqlr-- ; There was no PTE to start with...
8363 bl mapLockPteg ; Lock the PTEG
8365 lwz r0,mpPte(r31) ; Grab the PTE offset again
8366 mtlr r2 ; Restore the LR
8367 andi. r3,r0,mpHValid ; Is there a possible PTE?
8368 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8370 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8371 add r3,r3,r10 ; Point to the actual PTE
8372 ld r4,0(r3) ; Get the top of the PTE
8374 li r8,tlbieLock ; Get the TLBIE lock
8375 rldicr r0,r4,0,62 ; Clear the valid bit
8376 std r0,0(r3) ; Invalidate the PTE
8378 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
8379 sync ; Make sure everyone sees the invalidate
8380 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
8382 mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
8383 mr. r0,r0 ; Is it locked?
8384 li r0,1 ; Get our lock word
8385 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
8387 stwcx. r0,0,r8 ; Try to get it
8388 bne-- mITLBIE64 ; We was beat...
8390 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
8392 li r0,0 ; Lock clear value
8394 tlbie r2 ; Invalidate it everywhere
8396 eieio ; Make sure that the tlbie happens first
8397 tlbsync ; Wait for everyone to catch up
8398 ptesync ; Wait for quiet again
8400 stw r0,tlbieLock(0) ; Clear the tlbie lock
8402 ld r5,8(r3) ; Get the real part
8403 srdi r10,r5,12 ; Change physical address to a ppnum
8404 b mINmerge ; Join the common 32-64-bit code...
8406 mITLBIE64a: li r5,lgKillResv ; Killing field
8407 stwcx. r5,0,r5 ; Kill reservation
8409 mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
8410 mr. r0,r0 ; Is it locked?
8411 beq++ mITLBIE64 ; Nope, try again...
8412 b mITLBIE64b ; Yup, wait for it...
8415 ; mapLockPteg - Locks a PTEG
8416 ; R7 points to PCA entry
8417 ; R6 contains PCA on return
8424 lwarx r6,0,r7 ; Pick up the PCA
8425 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8426 ori r0,r6,PCAlock ; Set the lock bit
8427 bne-- mLSkill ; It is locked...
8429 stwcx. r0,0,r7 ; Try to lock the PTEG
8430 bne-- mapLockPteg ; We collided...
8432 isync ; Nostradamus lied
8435 mLSkill: li r6,lgKillResv ; Get killing field
8436 stwcx. r6,0,r6 ; Kill it
8439 lwz r6,0(r7) ; Pick up the PCA
8440 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8441 beq++ mapLockPteg ; Nope, try again...
8442 b mapLockPteh ; Yes, wait for it...
8446 ; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
8447 ; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
8448 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
8449 ; R4 returns the slot index.
8451 ; CR7 also indicates that we have a block mapping
8453 ; The PTEG allocation controls are a bit map of the state of the PTEG.
8454 ; PCAfree indicates that the PTE slot is empty.
8455 ; PCAauto means that it comes from an autogen area. These
8456 ; guys do not keep track of reference and change and are actually "wired".
8457 ; They are easy to maintain. PCAsteal
8458 ; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
8459 ; fields fit in a single word and are loaded and stored under control of the
8460 ; PTEG control area lock (PCAlock).
8462 ; Note that PCAauto does not contribute to the steal calculations at all. Originally
8463 ; it did, autogens were second in priority. This can result in a pathalogical
8464 ; case where an instruction can not make forward progress, or one PTE slot
8467 ; Note that the PCA must be locked when we get here.
8469 ; Physically, the fields are arranged:
8476 ; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
8481 ; R3 = 1 - steal regular
8482 ; R3 = 2 - steal autogen
8483 ; R4 contains slot number
8484 ; R6 contains updated PCA image
8489 mapSelSlot: lis r10,0 ; Clear autogen mask
8490 li r9,0 ; Start a mask
8491 beq cr7,mSSnotblk ; Skip if this is not a block mapping
8492 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
8494 mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
8495 oris r9,r9,0x8000 ; Get a mask
8496 cntlzw r4,r6 ; Find a slot or steal one
8497 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
8498 rlwinm r4,r4,0,29,31 ; Isolate bit position
8499 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
8500 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
8501 srwi r11,r11,1 ; Slide steal mask right
8502 and r8,r6,r2 ; Isolate the old in use and autogen bits
8503 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
8504 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
8505 and r2,r2,r10 ; Keep the autogen part if autogen
8506 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
8507 or r6,r6,r2 ; Add in the new autogen bit
8508 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
8509 rlwinm r8,r8,1,31,31 ; Isolate old in use
8510 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
8512 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
8516 ; Shared/Exclusive locks
8518 ; A shared/exclusive lock allows multiple shares of a lock to be taken
8519 ; but only one exclusive. A shared lock can be "promoted" to exclusive
8520 ; when it is the only share. If there are multiple sharers, the lock
8521 ; must be "converted". A promotion drops the share and gains exclusive as
8522 ; an atomic operation. If anyone else has a share, the operation fails.
8523 ; A conversion first drops the share and then takes an exclusive lock.
8525 ; We will want to add a timeout to this eventually.
8527 ; R3 is set to 0 for success, non-zero for failure
8531 ; Convert a share into an exclusive
8538 lis r0,0x8000 ; Get the locked lock image
8540 mflr r0 ; (TEST/DEBUG)
8541 oris r0,r0,0x8000 ; (TEST/DEBUG)
8544 sxlkCTry: lwarx r2,0,r3 ; Get the lock word
8545 cmplwi r2,1 ; Does it just have our share?
8546 subi r2,r2,1 ; Drop our share in case we do not get it
8547 bne-- sxlkCnotfree ; No, we need to unlock...
8548 stwcx. r0,0,r3 ; Try to take it exclusively
8549 bne-- sxlkCTry ; Collision, try again...
8556 stwcx. r2,0,r3 ; Try to drop our share...
8557 bne-- sxlkCTry ; Try again if we collided...
8558 b sxlkExclusive ; Go take it exclusively...
8561 ; Promote shared to exclusive
8567 lis r0,0x8000 ; Get the locked lock image
8569 mflr r0 ; (TEST/DEBUG)
8570 oris r0,r0,0x8000 ; (TEST/DEBUG)
8573 sxlkPTry: lwarx r2,0,r3 ; Get the lock word
8574 cmplwi r2,1 ; Does it just have our share?
8575 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
8576 stwcx. r0,0,r3 ; Try to take it exclusively
8577 bne-- sxlkPTry ; Collision, try again...
8583 sxlkPkill: li r2,lgKillResv ; Point to killing field
8584 stwcx. r2,0,r2 ; Kill reservation
8590 ; Take lock exclusivily
8596 lis r0,0x8000 ; Get the locked lock image
8598 mflr r0 ; (TEST/DEBUG)
8599 oris r0,r0,0x8000 ; (TEST/DEBUG)
8602 sxlkXTry: lwarx r2,0,r3 ; Get the lock word
8603 mr. r2,r2 ; Is it locked?
8604 bne-- sxlkXWait ; Yes...
8605 stwcx. r0,0,r3 ; Try to take it
8606 bne-- sxlkXTry ; Collision, try again...
8608 isync ; Toss anything younger than us
8614 sxlkXWait: li r2,lgKillResv ; Point to killing field
8615 stwcx. r2,0,r2 ; Kill reservation
8617 sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
8618 mr. r2,r2 ; Is it free yet?
8619 beq++ sxlkXTry ; Yup...
8620 b sxlkXWaiu ; Hang around a bit more...
8623 ; Take a share of the lock
8628 sxlkShared: lwarx r2,0,r3 ; Get the lock word
8629 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8630 addi r2,r2,1 ; Up the share count
8631 bne-- sxlkSWait ; Yes...
8632 stwcx. r2,0,r3 ; Try to take it
8633 bne-- sxlkShared ; Collision, try again...
8635 isync ; Toss anything younger than us
8641 sxlkSWait: li r2,lgKillResv ; Point to killing field
8642 stwcx. r2,0,r2 ; Kill reservation
8644 sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
8645 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8646 beq++ sxlkShared ; Nope...
8647 b sxlkSWaiu ; Hang around a bit more...
8650 ; Unlock either exclusive or shared.
8655 sxlkUnlock: eieio ; Make sure we order our stores out
8657 sxlkUnTry: lwarx r2,0,r3 ; Get the lock
8658 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
8659 subi r2,r2,1 ; Remove our share if we have one
8660 li r0,0 ; Clear this
8661 bne-- sxlkUExclu ; We hold exclusive...
8663 stwcx. r2,0,r3 ; Try to lose our share
8664 bne-- sxlkUnTry ; Collision...
8667 sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
8668 beqlr++ ; Leave if ok...
8669 b sxlkUnTry ; Could not store, try over...
8673 .globl EXT(fillPage)
8677 mfsprg r0,2 ; Get feature flags
8678 mtcrf 0x02,r0 ; move pf64Bit to cr
8680 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
8681 lis r2,0x0200 ; Get vec
8683 ori r2,r2,0x2000 ; Get FP
8687 andc r5,r5,r2 ; Clear out permanent turn-offs
8689 ori r2,r2,0x8030 ; Clear IR, DR and EE
8691 andc r0,r5,r2 ; Kill them
8694 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
8696 slwi r3,r3,12 ; Make into a physical address
8697 mtmsr r2 ; Interrupts and translation off
8700 li r2,4096/32 ; Get number of cache lines
8702 fp32again: dcbz 0,r3 ; Clear
8703 addic. r2,r2,-1 ; Count down
8707 stw r8,12(r3) ; Fill
8708 stw r9,16(r3) ; Fill
8709 stw r10,20(r3) ; Fill
8710 stw r11,24(r3) ; Fill
8711 stw r12,28(r3) ; Fill
8712 addi r3,r3,32 ; Point next
8713 bgt+ fp32again ; Keep going
8715 mtmsr r5 ; Restore all
8722 sldi r2,r2,63 ; Get 64-bit bit
8723 or r0,r0,r2 ; Turn on 64-bit
8724 sldi r3,r3,12 ; Make into a physical address
8726 mtmsrd r0 ; Interrupts and translation off
8729 li r2,4096/128 ; Get number of cache lines
8731 fp64again: dcbz128 0,r3 ; Clear
8732 addic. r2,r2,-1 ; Count down
8735 std r7,16(r3) ; Fill
8736 std r8,24(r3) ; Fill
8737 std r9,32(r3) ; Fill
8738 std r10,40(r3) ; Fill
8739 std r11,48(r3) ; Fill
8740 std r12,56(r3) ; Fill
8741 std r4,64+0(r3) ; Fill
8742 std r6,64+8(r3) ; Fill
8743 std r7,64+16(r3) ; Fill
8744 std r8,64+24(r3) ; Fill
8745 std r9,64+32(r3) ; Fill
8746 std r10,64+40(r3) ; Fill
8747 std r11,64+48(r3) ; Fill
8748 std r12,64+56(r3) ; Fill
8749 addi r3,r3,128 ; Point next
8750 bgt+ fp64again ; Keep going
8752 mtmsrd r5 ; Restore all
8762 lis r11,hi16(EXT(mapdebug))
8763 ori r11,r11,lo16(EXT(mapdebug))
8768 mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
8783 .globl EXT(checkBogus)
8788 blr ; No-op normally