]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_vm.s
bdbe03dff4968f6021d3f84e309ef5531cb32dca
[apple/xnu.git] / osfmk / ppc / hw_vm.s
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <assym.s>
31 #include <debug.h>
32 #include <db_machine_commands.h>
33 #include <mach_rt.h>
34
35 #include <mach_debug.h>
36 #include <ppc/asm.h>
37 #include <ppc/proc_reg.h>
38 #include <ppc/exception.h>
39 #include <ppc/Performance.h>
40 #include <ppc/exception.h>
41 #include <mach/ppc/vm_param.h>
42
43 .text
44
45 ;
46 ; 0 0 1 2 3 4 4 5 6
47 ; 0 8 6 4 2 0 8 6 3
48 ; +--------+--------+--------+--------+--------+--------+--------+--------+
49 ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
50 ; +--------+--------+--------+--------+--------+--------+--------+--------+
51 ;
52 ; 0 0 1
53 ; 0 8 6
54 ; +--------+--------+--------+
55 ; |//////BB|BBBBBBBB|BBBB////| - SID - base
56 ; +--------+--------+--------+
57 ;
58 ; 0 0 1
59 ; 0 8 6
60 ; +--------+--------+--------+
61 ; |////////|11111111|111111//| - SID - copy 1
62 ; +--------+--------+--------+
63 ;
64 ; 0 0 1
65 ; 0 8 6
66 ; +--------+--------+--------+
67 ; |////////|//222222|22222222| - SID - copy 2
68 ; +--------+--------+--------+
69 ;
70 ; 0 0 1
71 ; 0 8 6
72 ; +--------+--------+--------+
73 ; |//////33|33333333|33//////| - SID - copy 3 - not needed
74 ; +--------+--------+--------+ for 65 bit VPN
75 ;
76 ; 0 0 1 2 3 4 4 5 5
77 ; 0 8 6 4 2 0 8 1 5
78 ; +--------+--------+--------+--------+--------+--------+--------+
79 ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
80 ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
81 ; 0 0 1 2 3 4 4 5 5
82 ; 0 8 6 4 2 0 8 1 5
83 ; +--------+--------+--------+--------+--------+--------+--------+
84 ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
85 ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
86 ; part of EA to make
87 ; room for SID base
88 ;
89 ;
90 ; 0 0 1 2 3 4 4 5 5
91 ; 0 8 6 4 2 0 8 1 5
92 ; +--------+--------+--------+--------+--------+--------+--------+
93 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
94 ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
95 ;
96 ; 0 0 1 2 3 4 4 5 6 7 7
97 ; 0 8 6 4 2 0 8 6 4 2 9
98 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
99 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
100 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
101 ;
102
103
104 /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
105 *
106 * Maps a page or block into a pmap
107 *
108 * Returns 0 if add worked or the vaddr of the first overlap if not
109 *
110 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
111 *
112 * 1) bump mapping busy count
113 * 2) lock pmap share
114 * 3) find mapping full path - finds all possible list previous elements
115 * 4) upgrade pmap to exclusive
116 * 5) add mapping to search list
117 * 6) find physent
118 * 7) lock physent
119 * 8) add to physent
120 * 9) unlock physent
121 * 10) unlock pmap
122 * 11) drop mapping busy count
123 *
124 *
125 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
126 *
127 * 1) bump mapping busy count
128 * 2) lock pmap share
129 * 3) find mapping full path - finds all possible list previous elements
130 * 4) upgrade pmap to exclusive
131 * 5) add mapping to search list
132 * 6) unlock pmap
133 * 7) drop mapping busy count
134 *
135 */
136
137 .align 5
138 .globl EXT(hw_add_map)
139
140 LEXT(hw_add_map)
141
142 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
143 mflr r0 ; Save the link register
144 stw r17,FM_ARG0+0x00(r1) ; Save a register
145 stw r18,FM_ARG0+0x04(r1) ; Save a register
146 stw r19,FM_ARG0+0x08(r1) ; Save a register
147 mfsprg r19,2 ; Get feature flags
148 stw r20,FM_ARG0+0x0C(r1) ; Save a register
149 stw r21,FM_ARG0+0x10(r1) ; Save a register
150 mtcrf 0x02,r19 ; move pf64Bit cr6
151 stw r22,FM_ARG0+0x14(r1) ; Save a register
152 stw r23,FM_ARG0+0x18(r1) ; Save a register
153 stw r24,FM_ARG0+0x1C(r1) ; Save a register
154 stw r25,FM_ARG0+0x20(r1) ; Save a register
155 stw r26,FM_ARG0+0x24(r1) ; Save a register
156 stw r27,FM_ARG0+0x28(r1) ; Save a register
157 stw r28,FM_ARG0+0x2C(r1) ; Save a register
158 stw r29,FM_ARG0+0x30(r1) ; Save a register
159 stw r30,FM_ARG0+0x34(r1) ; Save a register
160 stw r31,FM_ARG0+0x38(r1) ; Save a register
161 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
162
163 #if DEBUG
164 lwz r11,pmapFlags(r3) ; Get pmaps flags
165 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
166 bne hamPanic ; Call not valid for guest shadow assist pmap
167 #endif
168
169 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
170 mr r28,r3 ; Save the pmap
171 mr r31,r4 ; Save the mapping
172 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
173 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
174 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
175
176 b hamSF1x ; Done...
177
178 hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
179 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
180
181 hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
182
183 mr r17,r11 ; Save the MSR
184 xor r28,r28,r20 ; Convert the pmap to physical addressing
185 xor r31,r31,r21 ; Convert the mapping to physical addressing
186
187 la r3,pmapSXlk(r28) ; Point to the pmap search lock
188 bl sxlkShared ; Go get a shared lock on the mapping lists
189 mr. r3,r3 ; Did we get the lock?
190 lwz r24,mpFlags(r31) ; Pick up the flags
191 bne-- hamBadLock ; Nope...
192
193 li r21,0 ; Remember that we have the shared lock
194
195 ;
196 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
197 ; here so that we will know the previous elements so we can dequeue them
198 ; later.
199 ;
200
201 hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
202 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
203 mr r3,r28 ; Pass in pmap to search
204 lhz r23,mpBSize(r31) ; Get the block size for later
205 mr r29,r4 ; Save top half of vaddr for later
206 mr r30,r5 ; Save bottom half of vaddr for later
207
208 bl EXT(mapSearchFull) ; Go see if we can find it
209
210 li r22,lo16(0x800C) ; Get 0xFFFF800C
211 rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
212 addi r23,r23,1 ; Get actual length
213 rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25
214 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
215 slw r9,r23,r22 ; Isolate the low part
216 rlwnm r22,r23,r22,22,31 ; Extract the high order
217 addic r23,r9,-4096 ; Get the length to the last page
218 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
219 addme r22,r22 ; Do high order as well...
220 mr. r3,r3 ; Did we find a mapping here?
221 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
222 bne-- hamOverlay ; We found a mapping, this is no good, can not double map...
223
224 addc r9,r0,r23 ; Add size to get last page in new range
225 or. r0,r4,r5 ; Are we beyond the end?
226 adde r8,r29,r22 ; Add the rest of the length on
227 rlwinm r9,r9,0,0,31 ; Clean top half of sum
228 beq++ hamFits ; We are at the end...
229
230 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
231 cmplw r8,r4 ; Is our end before the next (top part)
232 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
233 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
234
235 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
236
237 ;
238 ; Here we try to convert to an exclusive lock. This will fail if someone else
239 ; has it shared.
240 ;
241 hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
242 la r3,pmapSXlk(r28) ; Point to the pmap search lock
243
244 bne-- hamGotX ; We already have the exclusive...
245
246 bl sxlkPromote ; Try to promote shared to exclusive
247 mr. r3,r3 ; Could we?
248 beq++ hamGotX ; Yeah...
249
250 ;
251 ; Since we could not promote our lock, we need to convert to it.
252 ; That means that we drop the shared lock and wait to get it
253 ; exclusive. Since we release the lock, we need to do the look up
254 ; again.
255 ;
256
257 la r3,pmapSXlk(r28) ; Point to the pmap search lock
258 bl sxlkConvert ; Convert shared to exclusive
259 mr. r3,r3 ; Could we?
260 bne-- hamBadLock ; Nope, we must have timed out...
261
262 li r21,1 ; Remember that we have the exclusive lock
263 b hamRescan ; Go look again...
264
265 .align 5
266
267 hamGotX: mr r3,r28 ; Get the pmap to insert into
268 mr r4,r31 ; Point to the mapping
269 bl EXT(mapInsert) ; Insert the mapping into the list
270
271 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
272 lhz r8,mpSpace(r31) ; Get the address space
273 lwz r11,lgpPcfg(r11) ; Get the page config
274 mfsdr1 r7 ; Get the hash table base/bounds
275 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
276
277 andi. r0,r24,mpType ; Is this a normal mapping?
278
279 rlwimi r8,r8,14,4,17 ; Double address space
280 rlwinm r9,r30,0,4,31 ; Clear segment
281 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
282 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
283 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
284 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
285 addi r4,r4,1 ; Bump up the mapped page count
286 srw r9,r9,r11 ; Isolate just the page index
287 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
288 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
289 xor r9,r9,r10 ; Get the hash to the PTEG
290
291 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
292
293 bl mapPhysFindLock ; Go find and lock the physent
294
295 bt++ pf64Bitb,ham64 ; This is 64-bit...
296
297 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
298 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
299 slwi r9,r9,6 ; Make PTEG offset
300 ori r7,r7,0xFFC0 ; Stick in the bottom part
301 rlwinm r12,r11,0,~ppFlags ; Clean it up
302 and r9,r9,r7 ; Wrap offset into table
303 mr r4,r31 ; Set the link to install
304 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
305 stw r12,mpAlias+4(r31) ; Move to the mapping
306 bl mapPhyCSet32 ; Install the link
307 b hamDone ; Go finish up...
308
309 .align 5
310
311 ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
312 subfic r7,r7,46 ; Get number of leading zeros
313 eqv r4,r4,r4 ; Get all ones
314 ld r11,ppLink(r3) ; Get the alias chain pointer
315 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
316 srd r4,r4,r7 ; Get the wrap mask
317 sldi r9,r9,7 ; Change hash to PTEG offset
318 andc r11,r11,r0 ; Clean out the lock and flags
319 and r9,r9,r4 ; Wrap to PTEG
320 mr r4,r31
321 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
322 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
323
324 bl mapPhyCSet64 ; Install the link
325
326 hamDone: bl mapPhysUnlock ; Unlock the physent chain
327
328 hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
329 bl sxlkUnlock ; Unlock the search list
330
331 mr r3,r31 ; Get the mapping pointer
332 bl mapDropBusy ; Drop the busy count
333
334 li r3,0 ; Set successful return
335 li r4,0 ; Set successful return
336
337 hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
338
339 mtmsr r17 ; Restore enables/translation/etc.
340 isync
341 b hamReturnC ; Join common...
342
343 hamR64: mtmsrd r17 ; Restore enables/translation/etc.
344 isync
345
346 hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
347 lwz r17,FM_ARG0+0x00(r1) ; Save a register
348 lwz r18,FM_ARG0+0x04(r1) ; Save a register
349 lwz r19,FM_ARG0+0x08(r1) ; Save a register
350 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
351 mtlr r0 ; Restore the return
352 lwz r21,FM_ARG0+0x10(r1) ; Save a register
353 lwz r22,FM_ARG0+0x14(r1) ; Save a register
354 lwz r23,FM_ARG0+0x18(r1) ; Save a register
355 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
356 lwz r25,FM_ARG0+0x20(r1) ; Save a register
357 lwz r26,FM_ARG0+0x24(r1) ; Save a register
358 lwz r27,FM_ARG0+0x28(r1) ; Save a register
359 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
360 lwz r29,FM_ARG0+0x30(r1) ; Save a register
361 lwz r30,FM_ARG0+0x34(r1) ; Save a register
362 lwz r31,FM_ARG0+0x38(r1) ; Save a register
363 lwz r1,0(r1) ; Pop the stack
364
365 blr ; Leave...
366
367
368 .align 5
369
370 hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
371 li r0,mpC|mpR ; Get a mask to turn off RC bits
372 lwz r23,mpFlags(r31) ; Get the requested flags
373 lwz r20,mpVAddr(r3) ; Get the overlay address
374 lwz r8,mpVAddr(r31) ; Get the requested address
375 lwz r21,mpVAddr+4(r3) ; Get the overlay address
376 lwz r9,mpVAddr+4(r31) ; Get the requested address
377 lhz r10,mpBSize(r3) ; Get the overlay length
378 lhz r11,mpBSize(r31) ; Get the requested length
379 lwz r24,mpPAddr(r3) ; Get the overlay physical address
380 lwz r25,mpPAddr(r31) ; Get the requested physical address
381 andc r21,r21,r0 ; Clear RC bits
382 andc r9,r9,r0 ; Clear RC bits
383
384 la r3,pmapSXlk(r28) ; Point to the pmap search lock
385 bl sxlkUnlock ; Unlock the search list
386
387 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
388 mr r3,r20 ; Save the top of the colliding address
389 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
390
391 bne++ hamRemv ; Removing, go say so so we help...
392
393 cmplw r20,r8 ; High part of vaddr the same?
394 cmplw cr1,r21,r9 ; Low part?
395 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
396
397 cmplw r10,r11 ; Size the same?
398 cmplw cr1,r24,r25 ; Physical address?
399 crand cr5_eq,cr5_eq,cr0_eq ; Remember
400 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
401
402 xor r23,r23,r22 ; Compare mapping flag words
403 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
404 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
405 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
406
407 ori r4,r4,mapRtMapDup ; Set duplicate
408 b hamReturn ; And leave...
409
410 hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
411 b hamReturn ; Come back yall...
412
413 hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
414 b hamReturn ; Join common epilog code
415
416 .align 5
417
418 hamBadLock: li r3,0 ; Set lock time out error code
419 li r4,mapRtBadLk ; Set lock time out error code
420 b hamReturn ; Leave....
421
422 hamPanic: lis r0,hi16(Choke) ; System abend
423 ori r0,r0,lo16(Choke) ; System abend
424 li r3,failMapping ; Show that we failed some kind of mapping thing
425 sc
426
427
428
429
430 /*
431 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
432 *
433 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
434 * a 64-bit quantity, it is a long long so it is in R4 and R5.
435 *
436 * We return the virtual address of the removed mapping as a
437 * R3.
438 *
439 * Note that this is designed to be called from 32-bit mode with a stack.
440 *
441 * We disable translation and all interruptions here. This keeps is
442 * from having to worry about a deadlock due to having anything locked
443 * and needing it to process a fault.
444 *
445 * Note that this must be done with both interruptions off and VM off
446 *
447 * Remove mapping via pmap, regular page, no pte
448 *
449 * 1) lock pmap share
450 * 2) find mapping full path - finds all possible list previous elements
451 * 4) upgrade pmap to exclusive
452 * 3) bump mapping busy count
453 * 5) remove mapping from search list
454 * 6) unlock pmap
455 * 7) lock physent
456 * 8) remove from physent
457 * 9) unlock physent
458 * 10) drop mapping busy count
459 * 11) drain mapping busy count
460 *
461 *
462 * Remove mapping via pmap, regular page, with pte
463 *
464 * 1) lock pmap share
465 * 2) find mapping full path - finds all possible list previous elements
466 * 3) upgrade lock to exclusive
467 * 4) bump mapping busy count
468 * 5) lock PTEG
469 * 6) invalidate pte and tlbie
470 * 7) atomic merge rc into physent
471 * 8) unlock PTEG
472 * 9) remove mapping from search list
473 * 10) unlock pmap
474 * 11) lock physent
475 * 12) remove from physent
476 * 13) unlock physent
477 * 14) drop mapping busy count
478 * 15) drain mapping busy count
479 *
480 *
481 * Remove mapping via pmap, I/O or block
482 *
483 * 1) lock pmap share
484 * 2) find mapping full path - finds all possible list previous elements
485 * 3) upgrade lock to exclusive
486 * 4) bump mapping busy count
487 * 5) mark remove-in-progress
488 * 6) check and bump remove chunk cursor if needed
489 * 7) unlock pmap
490 * 8) if something to invalidate, go to step 11
491
492 * 9) drop busy
493 * 10) return with mapRtRemove to force higher level to call again
494
495 * 11) Lock PTEG
496 * 12) invalidate ptes, no tlbie
497 * 13) unlock PTEG
498 * 14) repeat 11 - 13 for all pages in chunk
499 * 15) if not final chunk, go to step 9
500 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
501 * 17) lock pmap share
502 * 18) find mapping full path - finds all possible list previous elements
503 * 19) upgrade lock to exclusive
504 * 20) remove mapping from search list
505 * 21) drop mapping busy count
506 * 22) drain mapping busy count
507 *
508 */
509
510 .align 5
511 .globl EXT(hw_rem_map)
512
513 LEXT(hw_rem_map)
514
515 ;
516 ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
517 ; THE HW_PURGE_* ROUTINES ALSO
518 ;
519
520 #define hrmStackSize ((31-15+1)*4)+4
521 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
522 mflr r0 ; Save the link register
523 stw r15,FM_ARG0+0x00(r1) ; Save a register
524 stw r16,FM_ARG0+0x04(r1) ; Save a register
525 stw r17,FM_ARG0+0x08(r1) ; Save a register
526 stw r18,FM_ARG0+0x0C(r1) ; Save a register
527 stw r19,FM_ARG0+0x10(r1) ; Save a register
528 mfsprg r19,2 ; Get feature flags
529 stw r20,FM_ARG0+0x14(r1) ; Save a register
530 stw r21,FM_ARG0+0x18(r1) ; Save a register
531 mtcrf 0x02,r19 ; move pf64Bit cr6
532 stw r22,FM_ARG0+0x1C(r1) ; Save a register
533 stw r23,FM_ARG0+0x20(r1) ; Save a register
534 stw r24,FM_ARG0+0x24(r1) ; Save a register
535 stw r25,FM_ARG0+0x28(r1) ; Save a register
536 stw r26,FM_ARG0+0x2C(r1) ; Save a register
537 stw r27,FM_ARG0+0x30(r1) ; Save a register
538 stw r28,FM_ARG0+0x34(r1) ; Save a register
539 stw r29,FM_ARG0+0x38(r1) ; Save a register
540 stw r30,FM_ARG0+0x3C(r1) ; Save a register
541 stw r31,FM_ARG0+0x40(r1) ; Save a register
542 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
543 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
544
545 #if DEBUG
546 lwz r11,pmapFlags(r3) ; Get pmaps flags
547 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
548 bne hrmPanic ; Call not valid for guest shadow assist pmap
549 #endif
550
551 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
552 lwz r9,pmapvr+4(r3) ; Get conversion mask
553 b hrmSF1x ; Done...
554
555 hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
556
557 hrmSF1x:
558 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
559
560 xor r28,r3,r9 ; Convert the pmap to physical addressing
561
562 ;
563 ; Here is where we join in from the hw_purge_* routines
564 ;
565
566 hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
567 mfsprg r19,2 ; Get feature flags again (for alternate entries)
568
569 mr r17,r11 ; Save the MSR
570 mr r29,r4 ; Top half of vaddr
571 mr r30,r5 ; Bottom half of vaddr
572
573 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
574 bne-- hrmGuest ; Yes, handle specially
575
576 la r3,pmapSXlk(r28) ; Point to the pmap search lock
577 bl sxlkShared ; Go get a shared lock on the mapping lists
578 mr. r3,r3 ; Did we get the lock?
579 bne-- hrmBadLock ; Nope...
580
581 ;
582 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
583 ; here so that we will know the previous elements so we can dequeue them
584 ; later. Note: we get back mpFlags in R7.
585 ;
586
587 mr r3,r28 ; Pass in pmap to search
588 mr r4,r29 ; High order of address
589 mr r5,r30 ; Low order of address
590 bl EXT(mapSearchFull) ; Go see if we can find it
591
592 andi. r0,r7,mpPerm ; Mapping marked permanent?
593 crmove cr5_eq,cr0_eq ; Remember permanent marking
594 mr r20,r7 ; Remember mpFlags
595 mr. r31,r3 ; Did we? (And remember mapping address for later)
596 mr r15,r4 ; Save top of next vaddr
597 mr r16,r5 ; Save bottom of next vaddr
598 beq-- hrmNotFound ; Nope, not found...
599
600 bf-- cr5_eq,hrmPerm ; This one can't be removed...
601 ;
602 ; Here we try to promote to an exclusive lock. This will fail if someone else
603 ; has it shared.
604 ;
605
606 la r3,pmapSXlk(r28) ; Point to the pmap search lock
607 bl sxlkPromote ; Try to promote shared to exclusive
608 mr. r3,r3 ; Could we?
609 beq++ hrmGotX ; Yeah...
610
611 ;
612 ; Since we could not promote our lock, we need to convert to it.
613 ; That means that we drop the shared lock and wait to get it
614 ; exclusive. Since we release the lock, we need to do the look up
615 ; again.
616 ;
617
618 la r3,pmapSXlk(r28) ; Point to the pmap search lock
619 bl sxlkConvert ; Convert shared to exclusive
620 mr. r3,r3 ; Could we?
621 bne-- hrmBadLock ; Nope, we must have timed out...
622
623 mr r3,r28 ; Pass in pmap to search
624 mr r4,r29 ; High order of address
625 mr r5,r30 ; Low order of address
626 bl EXT(mapSearchFull) ; Rescan the list
627
628 andi. r0,r7,mpPerm ; Mapping marked permanent?
629 crmove cr5_eq,cr0_eq ; Remember permanent marking
630 mr. r31,r3 ; Did we lose it when we converted?
631 mr r20,r7 ; Remember mpFlags
632 mr r15,r4 ; Save top of next vaddr
633 mr r16,r5 ; Save bottom of next vaddr
634 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
635
636 bf-- cr5_eq,hrmPerm ; This one can't be removed...
637
638 ;
639 ; We have an exclusive lock on the mapping chain. And we
640 ; also have the busy count bumped in the mapping so it can
641 ; not vanish on us.
642 ;
643
644 hrmGotX: mr r3,r31 ; Get the mapping
645 bl mapBumpBusy ; Bump up the busy count
646
647 ;
648 ; Invalidate any PTEs associated with this
649 ; mapping (more than one if a block) and accumulate the reference
650 ; and change bits.
651 ;
652 ; Here is also where we need to split 32- and 64-bit processing
653 ;
654
655 lwz r21,mpPte(r31) ; Grab the offset to the PTE
656 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
657 mfsdr1 r29 ; Get the hash table base and size
658
659 rlwinm r0,r20,0,mpType ; Isolate mapping type
660 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
661 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
662
663 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
664 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
665 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
666 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
667 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
668 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
669 andc r29,r29,r2 ; Clean up hash table base
670 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
671 mr r30,r23 ; Move the now merged vaddr to the correct register
672 add r26,r29,r21 ; Point to the PTEG slot
673
674 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
675
676 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
677 beq- cr5,hrmBlock32 ; Go treat block specially...
678 subfic r9,r9,-4 ; Get the PCA entry offset
679 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
680 add r7,r9,r29 ; Point to the PCA slot
681
682 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
683
684 lwz r21,mpPte(r31) ; Get the quick pointer again
685 lwz r5,0(r26) ; Get the top of PTE
686
687 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
688 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
689 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
690 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
691 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
692
693 stw r5,0(r26) ; Invalidate the PTE
694
695 li r9,tlbieLock ; Get the TLBIE lock
696
697 sync ; Make sure the invalid PTE is actually in memory
698
699 hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
700 mr. r5,r5 ; Is it locked?
701 li r5,1 ; Get locked indicator
702 bne- hrmPtlb32 ; It is locked, go spin...
703 stwcx. r5,0,r9 ; Try to get it
704 bne- hrmPtlb32 ; We was beat...
705
706 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
707
708 tlbie r30 ; Invalidate it all corresponding TLB entries
709
710 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
711
712 eieio ; Make sure that the tlbie happens first
713 tlbsync ; Wait for everyone to catch up
714 sync ; Make sure of it all
715
716 hrmNTlbs: li r0,0 ; Clear this
717 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
718 stw r0,tlbieLock(0) ; Clear the tlbie lock
719 lis r0,0x8000 ; Get bit for slot 0
720 eieio ; Make sure those RC bit have been stashed in PTE
721
722 srw r0,r0,r2 ; Get the allocation hash mask
723 lwz r22,4(r26) ; Get the latest reference and change bits
724 or r6,r6,r0 ; Show that this slot is free
725
726 hrmUlckPCA32:
727 eieio ; Make sure all updates come first
728 stw r6,0(r7) ; Unlock the PTEG
729
730 ;
731 ; Now, it is time to remove the mapping and unlock the chain.
732 ; But first, we need to make sure no one else is using this
733 ; mapping so we drain the busy now
734 ;
735
736 hrmPysDQ32: mr r3,r31 ; Point to the mapping
737 bl mapDrainBusy ; Go wait until mapping is unused
738
739 mr r3,r28 ; Get the pmap to remove from
740 mr r4,r31 ; Point to the mapping
741 bl EXT(mapRemove) ; Remove the mapping from the list
742
743 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
744 rlwinm r0,r20,0,mpType ; Isolate mapping type
745 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
746 la r3,pmapSXlk(r28) ; Point to the pmap search lock
747 subi r4,r4,1 ; Drop down the mapped page count
748 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
749 bl sxlkUnlock ; Unlock the search list
750
751 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
752
753 bl mapPhysFindLock ; Go find and lock the physent
754
755 lwz r9,ppLink+4(r3) ; Get first mapping
756
757 mr r4,r22 ; Get the RC bits we just got
758 bl mapPhysMerge ; Go merge the RC bits
759
760 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
761
762 cmplw r9,r31 ; Are we the first on the list?
763 bne- hrmNot1st ; Nope...
764
765 li r9,0 ; Get a 0
766 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
767 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
768 bl mapPhyCSet32 ; Go set the physent link and preserve flags
769
770 b hrmPhyDQd ; Join up and unlock it all...
771
772 .align 5
773
774 hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
775 and r8,r8,r31 ; Get back to a page
776 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
777
778 la r3,pmapSXlk(r28) ; Point to the pmap search lock
779 bl sxlkUnlock ; Unlock the search list
780
781 xor r3,r31,r8 ; Flip mapping address to virtual
782 ori r3,r3,mapRtPerm ; Set permanent mapping error
783 b hrmErRtn
784
785 hrmBadLock: li r3,mapRtBadLk ; Set bad lock
786 b hrmErRtn
787
788 hrmEndInSight:
789 la r3,pmapSXlk(r28) ; Point to the pmap search lock
790 bl sxlkUnlock ; Unlock the search list
791
792 hrmDoneChunk:
793 mr r3,r31 ; Point to the mapping
794 bl mapDropBusy ; Drop the busy here since we need to come back
795 li r3,mapRtRemove ; Say we are still removing this
796 b hrmErRtn
797
798 .align 5
799
800 hrmNotFound:
801 la r3,pmapSXlk(r28) ; Point to the pmap search lock
802 bl sxlkUnlock ; Unlock the search list
803 li r3,mapRtNotFnd ; No mapping found
804
805 hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
806
807 mtmsr r17 ; Restore enables/translation/etc.
808 isync
809 b hrmRetnCmn ; Join the common return code...
810
811 hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
812 isync
813 b hrmRetnCmn ; Join the common return code...
814
815 .align 5
816
817 hrmNot1st: mr. r8,r9 ; Remember and test current node
818 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
819 lwz r9,mpAlias+4(r9) ; Chain to the next
820 cmplw r9,r31 ; Is this us?
821 bne- hrmNot1st ; Not us...
822
823 lwz r9,mpAlias+4(r9) ; Get our forward pointer
824 stw r9,mpAlias+4(r8) ; Unchain us
825
826 nop ; For alignment
827
828 hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
829
830 hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
831 mr r3,r31 ; Copy the pointer to the mapping
832 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
833 bl mapDrainBusy ; Go wait until mapping is unused
834
835 xor r3,r31,r8 ; Flip mapping address to virtual
836
837 mtmsr r17 ; Restore enables/translation/etc.
838 isync
839
840 hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
841 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
842 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
843 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
844 mr. r6,r6 ; Should we pass back the "next" vaddr?
845 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
846 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
847 mtlr r0 ; Restore the return
848
849 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
850 beq hrmNoNextAdr ; Do not pass back the next vaddr...
851 stw r15,0(r6) ; Pass back the top of the next vaddr
852 stw r16,4(r6) ; Pass back the bottom of the next vaddr
853
854 hrmNoNextAdr:
855 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
856 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
857 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
858 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
859 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
860 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
861 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
862 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
863 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
864 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
865 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
866 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
867 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
868 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
869 lwz r1,0(r1) ; Pop the stack
870 blr ; Leave...
871
872 ;
873 ; Here is where we come when all is lost. Somehow, we failed a mapping function
874 ; that must work... All hope is gone. Alas, we die.......
875 ;
876
877 hrmPanic: lis r0,hi16(Choke) ; System abend
878 ori r0,r0,lo16(Choke) ; System abend
879 li r3,failMapping ; Show that we failed some kind of mapping thing
880 sc
881
882
883 ;
884 ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
885 ; in the range. Then, if we did not finish, return a code indicating that we need to
886 ; be called again. Eventually, we will finish and then, we will do a TLBIE for each
887 ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
888 ;
889 ; A potential speed up is that we stop the invalidate loop once we have walked through
890 ; the hash table once. This really is not worth the trouble because we need to have
891 ; mapped 1/2 of physical RAM in an individual block. Way unlikely.
892 ;
893 ; We should rethink this and see if we think it will be faster to check PTE and
894 ; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
895 ;
896
897 .align 5
898
899 hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu
900 rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
901 lhz r25,mpBSize(r31) ; Get the number of pages in block
902 lhz r23,mpSpace(r31) ; Get the address space hash
903 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
904 rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13
905 addi r25,r25,1 ; Account for zero-based counting
906 ori r0,r20,mpRIP ; Turn on the remove in progress flag
907 slw r25,r25,r29 ; Adjust for 32MB if needed
908 mfsdr1 r29 ; Get the hash table base and size
909 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
910 subi r25,r25,1 ; Convert back to zero-based counting
911 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
912 sub r4,r25,r9 ; Get number of pages left
913 cmplw cr1,r9,r25 ; Have we already hit the end?
914 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
915 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
916 rlwinm r26,r29,16,7,15 ; Get the hash table size
917 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
918 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
919 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
920 cmpwi cr7,r2,0 ; Remember if we have finished
921 slwi r0,r9,12 ; Make cursor into page offset
922 or r24,r24,r23 ; Get full hash
923 and r4,r4,r2 ; If more than a chunk, bring this back to 0
924 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
925 add r27,r27,r0 ; Adjust vaddr to start of current chunk
926 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
927
928 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
929
930 la r3,pmapSXlk(r28) ; Point to the pmap search lock
931 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
932 bl sxlkUnlock ; Unlock the search list while we are invalidating
933
934 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
935 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
936 xor r24,r24,r8 ; Get the proper VSID
937 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
938 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
939 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
940 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
941 add r22,r22,r30 ; Get end address (in PTEG units)
942
943 hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
944 xor r23,r23,r24 ; Hash it
945 and r23,r23,r26 ; Wrap it into the table
946 rlwinm r3,r23,28,4,29 ; Change to PCA offset
947 subfic r3,r3,-4 ; Get the PCA entry offset
948 add r7,r3,r29 ; Point to the PCA slot
949 cmplw cr5,r30,r22 ; Check if we reached the end of the range
950 addi r30,r30,64 ; bump to the next vaddr
951
952 bl mapLockPteg ; Lock the PTEG
953
954 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
955 add r5,r23,r29 ; Point to the PTEG
956 li r0,0 ; Set an invalid PTE value
957 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
958 mtcrf 0x80,r4 ; Set CRs to select PTE slots
959 mtcrf 0x40,r4 ; Set CRs to select PTE slots
960
961 bf 0,hrmSlot0 ; No autogen here
962 stw r0,0x00(r5) ; Invalidate PTE
963
964 hrmSlot0: bf 1,hrmSlot1 ; No autogen here
965 stw r0,0x08(r5) ; Invalidate PTE
966
967 hrmSlot1: bf 2,hrmSlot2 ; No autogen here
968 stw r0,0x10(r5) ; Invalidate PTE
969
970 hrmSlot2: bf 3,hrmSlot3 ; No autogen here
971 stw r0,0x18(r5) ; Invalidate PTE
972
973 hrmSlot3: bf 4,hrmSlot4 ; No autogen here
974 stw r0,0x20(r5) ; Invalidate PTE
975
976 hrmSlot4: bf 5,hrmSlot5 ; No autogen here
977 stw r0,0x28(r5) ; Invalidate PTE
978
979 hrmSlot5: bf 6,hrmSlot6 ; No autogen here
980 stw r0,0x30(r5) ; Invalidate PTE
981
982 hrmSlot6: bf 7,hrmSlot7 ; No autogen here
983 stw r0,0x38(r5) ; Invalidate PTE
984
985 hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
986 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
987 andc r6,r6,r0 ; Turn off all the old autogen bits
988
989 hrmBNone32: eieio ; Make sure all updates come first
990
991 stw r6,0(r7) ; Unlock and set the PCA
992
993 bne+ cr5,hrmBInv32 ; Go invalidate the next...
994
995 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
996
997 mr r3,r31 ; Copy the pointer to the mapping
998 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
999
1000 sync ; Make sure memory is consistent
1001
1002 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1003 li r6,63 ; Assume full invalidate for now
1004 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1005 andc r6,r6,r5 ; Clear max if we have less to do
1006 and r5,r25,r5 ; Clear count if we have more than max
1007 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1008 li r7,tlbieLock ; Get the TLBIE lock
1009 or r5,r5,r6 ; Get number of TLBIEs needed
1010
1011 hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1012 mr. r2,r2 ; Is it locked?
1013 li r2,1 ; Get our lock value
1014 bne- hrmBTLBlck ; It is locked, go wait...
1015 stwcx. r2,0,r7 ; Try to get it
1016 bne- hrmBTLBlck ; We was beat...
1017
1018 hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1019 tlbie r27 ; Invalidate it everywhere
1020 addi r27,r27,0x1000 ; Up to the next page
1021 bge+ hrmBTLBi ; Make sure we have done it all...
1022
1023 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1024 li r2,0 ; Lock clear value
1025
1026 sync ; Make sure all is quiet
1027 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1028
1029 eieio ; Make sure that the tlbie happens first
1030 tlbsync ; Wait for everyone to catch up
1031 sync ; Wait for quiet again
1032
1033 hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1034
1035 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1036 bl sxlkShared ; Go get a shared lock on the mapping lists
1037 mr. r3,r3 ; Did we get the lock?
1038 bne- hrmPanic ; Nope...
1039
1040 lwz r4,mpVAddr(r31) ; High order of address
1041 lwz r5,mpVAddr+4(r31) ; Low order of address
1042 mr r3,r28 ; Pass in pmap to search
1043 mr r29,r4 ; Save this in case we need it (only promote fails)
1044 mr r30,r5 ; Save this in case we need it (only promote fails)
1045 bl EXT(mapSearchFull) ; Go see if we can find it
1046
1047 mr. r3,r3 ; Did we? (And remember mapping address for later)
1048 mr r15,r4 ; Save top of next vaddr
1049 mr r16,r5 ; Save bottom of next vaddr
1050 beq- hrmPanic ; Nope, not found...
1051
1052 cmplw r3,r31 ; Same mapping?
1053 bne- hrmPanic ; Not good...
1054
1055 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1056 bl sxlkPromote ; Try to promote shared to exclusive
1057 mr. r3,r3 ; Could we?
1058 mr r3,r31 ; Restore the mapping pointer
1059 beq+ hrmBDone1 ; Yeah...
1060
1061 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1062 bl sxlkConvert ; Convert shared to exclusive
1063 mr. r3,r3 ; Could we?
1064 bne-- hrmPanic ; Nope, we must have timed out...
1065
1066 mr r3,r28 ; Pass in pmap to search
1067 mr r4,r29 ; High order of address
1068 mr r5,r30 ; Low order of address
1069 bl EXT(mapSearchFull) ; Rescan the list
1070
1071 mr. r3,r3 ; Did we lose it when we converted?
1072 mr r15,r4 ; Save top of next vaddr
1073 mr r16,r5 ; Save bottom of next vaddr
1074 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1075
1076 hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1077
1078 mr r3,r28 ; Get the pmap to remove from
1079 mr r4,r31 ; Point to the mapping
1080 bl EXT(mapRemove) ; Remove the mapping from the list
1081
1082 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1083 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1084 subi r4,r4,1 ; Drop down the mapped page count
1085 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1086 bl sxlkUnlock ; Unlock the search list
1087
1088 b hrmRetn32 ; We are all done, get out...
1089
1090 ;
1091 ; Here we handle the 64-bit version of hw_rem_map
1092 ;
1093
1094 .align 5
1095
1096 hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1097 beq-- cr5,hrmBlock64 ; Go treat block specially...
1098 subfic r9,r9,-4 ; Get the PCA entry offset
1099 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1100 add r7,r9,r29 ; Point to the PCA slot
1101
1102 bl mapLockPteg ; Go lock up the PTEG
1103
1104 lwz r21,mpPte(r31) ; Get the quick pointer again
1105 ld r5,0(r26) ; Get the top of PTE
1106
1107 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1108 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
1109 sldi r23,r5,16 ; Shift AVPN up to EA format
1110 // **** Need to adjust above shift based on the page size - large pages need to shift a bit more
1111 rldicr r5,r5,0,62 ; Clear the valid bit
1112 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1113 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1114 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1115
1116 std r5,0(r26) ; Invalidate the PTE
1117
1118 li r9,tlbieLock ; Get the TLBIE lock
1119
1120 sync ; Make sure the invalid PTE is actually in memory
1121
1122 hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1123 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1124 mr. r5,r5 ; Is it locked?
1125 li r5,1 ; Get locked indicator
1126 bne-- hrmPtlb64w ; It is locked, go spin...
1127 stwcx. r5,0,r9 ; Try to get it
1128 bne-- hrmPtlb64 ; We was beat...
1129
1130 tlbie r23 ; Invalidate all corresponding TLB entries
1131
1132 eieio ; Make sure that the tlbie happens first
1133 tlbsync ; Wait for everyone to catch up
1134
1135 ptesync ; Make sure of it all
1136 li r0,0 ; Clear this
1137 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1138 stw r0,tlbieLock(0) ; Clear the tlbie lock
1139 oris r0,r0,0x8000 ; Assume slot 0
1140
1141 srw r0,r0,r2 ; Get slot mask to deallocate
1142
1143 lwz r22,12(r26) ; Get the latest reference and change bits
1144 or r6,r6,r0 ; Make the guy we killed free
1145
1146 hrmUlckPCA64:
1147 eieio ; Make sure all updates come first
1148
1149 stw r6,0(r7) ; Unlock and change the PCA
1150
1151 hrmPysDQ64: mr r3,r31 ; Point to the mapping
1152 bl mapDrainBusy ; Go wait until mapping is unused
1153
1154 mr r3,r28 ; Get the pmap to remove from
1155 mr r4,r31 ; Point to the mapping
1156 bl EXT(mapRemove) ; Remove the mapping from the list
1157
1158 rlwinm r0,r20,0,mpType ; Isolate mapping type
1159 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
1160 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1161 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1162 subi r4,r4,1 ; Drop down the mapped page count
1163 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1164 bl sxlkUnlock ; Unlock the search list
1165
1166 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1167
1168 bl mapPhysFindLock ; Go find and lock the physent
1169
1170 li r0,ppLFAmask ; Get mask to clean up mapping pointer
1171 ld r9,ppLink(r3) ; Get first mapping
1172 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1173 mr r4,r22 ; Get the RC bits we just got
1174
1175 bl mapPhysMerge ; Go merge the RC bits
1176
1177 andc r9,r9,r0 ; Clean up the mapping pointer
1178
1179 cmpld r9,r31 ; Are we the first on the list?
1180 bne-- hrmNot1st64 ; Nope...
1181
1182 li r9,0 ; Get a 0
1183 ld r4,mpAlias(r31) ; Get our forward pointer
1184
1185 std r9,mpAlias(r31) ; Make sure we are off the chain
1186 bl mapPhyCSet64 ; Go set the physent link and preserve flags
1187
1188 b hrmPhyDQd64 ; Join up and unlock it all...
1189
1190 hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1191 stwcx. r5,0,r5 ; Clear the pending reservation
1192
1193
1194 hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1195 mr. r5,r5 ; is it locked?
1196 beq++ hrmPtlb64 ; Nope...
1197 b hrmPtlb64x ; Sniff some more...
1198
1199 .align 5
1200
1201 hrmNot1st64:
1202 mr. r8,r9 ; Remember and test current node
1203 beq-- hrmPhyDQd64 ; Could not find our node...
1204 ld r9,mpAlias(r9) ; Chain to the next
1205 cmpld r9,r31 ; Is this us?
1206 bne-- hrmNot1st64 ; Not us...
1207
1208 ld r9,mpAlias(r9) ; Get our forward pointer
1209 std r9,mpAlias(r8) ; Unchain us
1210
1211 nop ; For alignment
1212
1213 hrmPhyDQd64:
1214 bl mapPhysUnlock ; Unlock the physent chain
1215
1216 hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1217 mr r3,r31 ; Copy the pointer to the mapping
1218 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1219 bl mapDrainBusy ; Go wait until mapping is unused
1220
1221 xor r3,r31,r8 ; Flip mapping address to virtual
1222
1223 mtmsrd r17 ; Restore enables/translation/etc.
1224 isync
1225
1226 b hrmRetnCmn ; Join the common return path...
1227
1228
1229 ;
1230 ; Check hrmBlock32 for comments.
1231 ;
1232
1233 .align 5
1234
1235 hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu
1236 rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
1237 lhz r24,mpSpace(r31) ; Get the address space hash
1238 lhz r25,mpBSize(r31) ; Get the number of pages in block
1239 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1240 rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13
1241 addi r25,r25,1 ; Account for zero-based counting
1242 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1243 slw r25,r25,r29 ; Adjust for 32MB if needed
1244 mfsdr1 r29 ; Get the hash table base and size
1245 ld r27,mpVAddr(r31) ; Get the base vaddr
1246 subi r25,r25,1 ; Convert back to zero-based counting
1247 rlwinm r5,r29,0,27,31 ; Isolate the size
1248 sub r4,r25,r9 ; Get number of pages left
1249 cmplw cr1,r9,r25 ; Have we already hit the end?
1250 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1251 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1252 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1253 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1254 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1255 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1256 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1257 srdi r27,r27,12 ; Change address into page index
1258 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1259 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1260
1261 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1262
1263 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1264 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1265 bl sxlkUnlock ; Unlock the search list while we are invalidating
1266
1267 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1268 eqv r26,r26,r26 ; Get all foxes here
1269 rldimi r24,r24,28,8 ; Make a couple copies up higher
1270 rldicr r29,r29,0,47 ; Isolate just the hash table base
1271 subfic r5,r5,46 ; Get number of leading zeros
1272 srd r26,r26,r5 ; Shift the size bits over
1273 mr r30,r27 ; Get start of chunk to invalidate
1274 rldicr r26,r26,0,56 ; Make length in PTEG units
1275 add r22,r4,r30 ; Get end page number
1276
1277 hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1278 rldicr r0,r0,0,49 ; Clean all but segment portion
1279 rlwinm r2,r30,0,16,31 ; Get the current page index
1280 xor r0,r0,r24 ; Form VSID
1281 xor r8,r2,r0 ; Hash the vaddr
1282 sldi r8,r8,7 ; Make into PTEG offset
1283 and r23,r8,r26 ; Wrap into the hash table
1284 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1285 subfic r3,r3,-4 ; Get the PCA entry offset
1286 add r7,r3,r29 ; Point to the PCA slot
1287
1288 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1289
1290 bl mapLockPteg ; Lock the PTEG
1291
1292 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1293 add r5,r23,r29 ; Point to the PTEG
1294 li r0,0 ; Set an invalid PTE value
1295 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1296 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1297 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1298
1299
1300 bf 0,hrmSlot0s ; No autogen here
1301 std r0,0x00(r5) ; Invalidate PTE
1302
1303 hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1304 std r0,0x10(r5) ; Invalidate PTE
1305
1306 hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1307 std r0,0x20(r5) ; Invalidate PTE
1308
1309 hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1310 std r0,0x30(r5) ; Invalidate PTE
1311
1312 hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1313 std r0,0x40(r5) ; Invalidate PTE
1314
1315 hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1316 std r0,0x50(r5) ; Invalidate PTE
1317
1318 hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1319 std r0,0x60(r5) ; Invalidate PTE
1320
1321 hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1322 std r0,0x70(r5) ; Invalidate PTE
1323
1324 hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1325 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1326 andc r6,r6,r0 ; Turn off all the old autogen bits
1327
1328 hrmBNone64: eieio ; Make sure all updates come first
1329 stw r6,0(r7) ; Unlock and set the PCA
1330
1331 addi r30,r30,1 ; bump to the next PTEG
1332 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1333
1334 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1335
1336 mr r3,r31 ; Copy the pointer to the mapping
1337 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1338
1339 sync ; Make sure memory is consistent
1340
1341 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1342 li r6,255 ; Assume full invalidate for now
1343 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1344 andc r6,r6,r5 ; Clear max if we have less to do
1345 and r5,r25,r5 ; Clear count if we have more than max
1346 sldi r24,r24,28 ; Get the full XOR value over to segment position
1347 ld r27,mpVAddr(r31) ; Get the base vaddr
1348 li r7,tlbieLock ; Get the TLBIE lock
1349 or r5,r5,r6 ; Get number of TLBIEs needed
1350
1351 hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1352 mr. r2,r2 ; Is it locked?
1353 li r2,1 ; Get our lock value
1354 bne-- hrmBTLBlcm ; It is locked, go wait...
1355 stwcx. r2,0,r7 ; Try to get it
1356 bne-- hrmBTLBlcl ; We was beat...
1357
1358 hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1359 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1360 addic. r5,r5,-1 ; See if we did them all
1361 xor r2,r2,r24 ; Make the VSID
1362 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1363 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1364
1365 tlbie r2 ; Invalidate it everywhere
1366 addi r27,r27,0x1000 ; Up to the next page
1367 bge++ hrmBTLBj ; Make sure we have done it all...
1368
1369 eieio ; Make sure that the tlbie happens first
1370 tlbsync ; wait for everyone to catch up
1371
1372 li r2,0 ; Lock clear value
1373
1374 ptesync ; Wait for quiet again
1375
1376 stw r2,tlbieLock(0) ; Clear the tlbie lock
1377
1378 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1379 bl sxlkShared ; Go get a shared lock on the mapping lists
1380 mr. r3,r3 ; Did we get the lock?
1381 bne- hrmPanic ; Nope...
1382
1383 lwz r4,mpVAddr(r31) ; High order of address
1384 lwz r5,mpVAddr+4(r31) ; Low order of address
1385 mr r3,r28 ; Pass in pmap to search
1386 mr r29,r4 ; Save this in case we need it (only promote fails)
1387 mr r30,r5 ; Save this in case we need it (only promote fails)
1388 bl EXT(mapSearchFull) ; Go see if we can find it
1389
1390 mr. r3,r3 ; Did we? (And remember mapping address for later)
1391 mr r15,r4 ; Save top of next vaddr
1392 mr r16,r5 ; Save bottom of next vaddr
1393 beq- hrmPanic ; Nope, not found...
1394
1395 cmpld r3,r31 ; Same mapping?
1396 bne- hrmPanic ; Not good...
1397
1398 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1399 bl sxlkPromote ; Try to promote shared to exclusive
1400 mr. r3,r3 ; Could we?
1401 mr r3,r31 ; Restore the mapping pointer
1402 beq+ hrmBDone2 ; Yeah...
1403
1404 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1405 bl sxlkConvert ; Convert shared to exclusive
1406 mr. r3,r3 ; Could we?
1407 bne-- hrmPanic ; Nope, we must have timed out...
1408
1409 mr r3,r28 ; Pass in pmap to search
1410 mr r4,r29 ; High order of address
1411 mr r5,r30 ; Low order of address
1412 bl EXT(mapSearchFull) ; Rescan the list
1413
1414 mr. r3,r3 ; Did we lose it when we converted?
1415 mr r15,r4 ; Save top of next vaddr
1416 mr r16,r5 ; Save bottom of next vaddr
1417 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1418
1419 hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1420
1421 mr r3,r28 ; Get the pmap to remove from
1422 mr r4,r31 ; Point to the mapping
1423 bl EXT(mapRemove) ; Remove the mapping from the list
1424
1425 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1426 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1427 subi r4,r4,1 ; Drop down the mapped page count
1428 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1429 bl sxlkUnlock ; Unlock the search list
1430
1431 b hrmRetn64 ; We are all done, get out...
1432
1433 hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1434 stwcx. r2,0,r2 ; Unreserve it
1435
1436 hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1437 mr. r2,r2 ; Is it held?
1438 beq++ hrmBTLBlcl ; Nope...
1439 b hrmBTLBlcn ; Yeah...
1440
1441 ;
1442 ; Guest shadow assist -- mapping remove
1443 ;
1444 ; Method of operation:
1445 ; o Locate the VMM extension block and the host pmap
1446 ; o Obtain the host pmap's search lock exclusively
1447 ; o Locate the requested mapping in the shadow hash table,
1448 ; exit if not found
1449 ; o If connected, disconnect the PTE and gather R&C to physent
1450 ; o Locate and lock the physent
1451 ; o Remove mapping from physent's chain
1452 ; o Unlock physent
1453 ; o Unlock pmap's search lock
1454 ;
1455 ; Non-volatile registers on entry:
1456 ; r17: caller's msr image
1457 ; r19: sprg2 (feature flags)
1458 ; r28: guest pmap's physical address
1459 ; r29: high-order 32 bits of guest virtual address
1460 ; r30: low-order 32 bits of guest virtual address
1461 ;
1462 ; Non-volatile register usage:
1463 ; r26: VMM extension block's physical address
1464 ; r27: host pmap's physical address
1465 ; r28: guest pmap's physical address
1466 ; r29: physent's physical address
1467 ; r30: guest virtual address
1468 ; r31: guest mapping's physical address
1469 ;
1470 .align 5
1471 hrmGuest:
1472 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1473 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1474 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1475 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1476 b hrmGStart ; Join common code
1477
1478 hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1479 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1480 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1481
1482 hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1483 bl sxlkExclusive ; Get lock exclusive
1484
1485 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1486
1487 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1488 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1489 srwi r11,r30,12 ; Form shadow hash:
1490 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1491 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1492 ; Form index offset from hash page number
1493 add r31,r31,r12 ; r31 <- hash page index entry
1494 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1495 mtctr r0 ; in this group
1496 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1497 lwz r31,4(r31) ; r31 <- hash page paddr
1498 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1499 ; r31 <- hash group paddr
1500
1501 addi r3,r3,1 ; Increment remove request count
1502 stw r3,vxsGrm(r26) ; Update remove request count
1503
1504 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1505 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1506 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1507 b hrmG32SrchLp ; Let the search begin!
1508
1509 .align 5
1510 hrmG32SrchLp:
1511 mr r6,r3 ; r6 <- current mapping slot's flags
1512 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1513 mr r7,r4 ; r7 <- current mapping slot's space ID
1514 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1515 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1516 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1517 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1518 xor r7,r7,r9 ; Compare space ID
1519 or r0,r11,r7 ; r0 <- !(free && space match)
1520 xor r8,r8,r30 ; Compare virtual address
1521 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1522 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1523
1524 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1525 bdnz hrmG32SrchLp ; Iterate
1526
1527 mr r6,r3 ; r6 <- current mapping slot's flags
1528 clrrwi r5,r5,12 ; Remove flags from virtual address
1529 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1530 xor r4,r4,r9 ; Compare space ID
1531 or r0,r11,r4 ; r0 <- !(free && space match)
1532 xor r5,r5,r30 ; Compare virtual address
1533 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1534 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1535 b hrmGSrchMiss ; No joy in our hash group
1536
1537 hrmG64Search:
1538 ld r31,0(r31) ; r31 <- hash page paddr
1539 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1540 ; r31 <- hash group paddr
1541 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1542 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1543 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1544 b hrmG64SrchLp ; Let the search begin!
1545
1546 .align 5
1547 hrmG64SrchLp:
1548 mr r6,r3 ; r6 <- current mapping slot's flags
1549 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1550 mr r7,r4 ; r7 <- current mapping slot's space ID
1551 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1552 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1553 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1554 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1555 xor r7,r7,r9 ; Compare space ID
1556 or r0,r11,r7 ; r0 <- !(free && space match)
1557 xor r8,r8,r30 ; Compare virtual address
1558 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1559 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1560
1561 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1562 bdnz hrmG64SrchLp ; Iterate
1563
1564 mr r6,r3 ; r6 <- current mapping slot's flags
1565 clrrdi r5,r5,12 ; Remove flags from virtual address
1566 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1567 xor r4,r4,r9 ; Compare space ID
1568 or r0,r11,r4 ; r0 <- !(free && space match)
1569 xor r5,r5,r30 ; Compare virtual address
1570 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1571 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1572 hrmGSrchMiss:
1573 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1574 li r25,mapRtNotFnd ; Return not found
1575 addi r3,r3,1 ; Increment miss count
1576 stw r3,vxsGrmMiss(r26) ; Update miss count
1577 b hrmGReturn ; Join guest return
1578
1579 .align 5
1580 hrmGSrchHit:
1581 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1582 bne hrmGDormant ; Yes, nothing to disconnect
1583
1584 lwz r3,vxsGrmActive(r26) ; Get active hit count
1585 addi r3,r3,1 ; Increment active hit count
1586 stw r3,vxsGrmActive(r26) ; Update hit count
1587
1588 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1589 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1590 ; r31 <- mapping's physical address
1591 ; r3 -> PTE slot physical address
1592 ; r4 -> High-order 32 bits of PTE
1593 ; r5 -> Low-order 32 bits of PTE
1594 ; r6 -> PCA
1595 ; r7 -> PCA physical address
1596 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1597 b hrmGFreePTE ; Join 64-bit path to release the PTE
1598 hrmGDscon64:
1599 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1600 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1601 hrmGFreePTE:
1602 mr. r3,r3 ; Was there a valid PTE?
1603 beq hrmGDormant ; No valid PTE, we're almost done
1604 lis r0,0x8000 ; Prepare free bit for this slot
1605 srw r0,r0,r2 ; Position free bit
1606 or r6,r6,r0 ; Set it in our PCA image
1607 lwz r8,mpPte(r31) ; Get PTE offset
1608 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1609 stw r8,mpPte(r31) ; Save invalidated PTE offset
1610 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1611 stw r6,0(r7) ; Update PCA and unlock the PTEG
1612
1613 hrmGDormant:
1614 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1615 bl mapFindLockPN ; Find 'n' lock this page's physent
1616 mr. r29,r3 ; Got lock on our physent?
1617 beq-- hrmGBadPLock ; No, time to bail out
1618
1619 crset cr1_eq ; cr1_eq <- previous link is the anchor
1620 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1621 la r11,ppLink+4(r29) ; Point to chain anchor
1622 lwz r9,ppLink+4(r29) ; Get chain anchor
1623 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1624 hrmGRemLoop:
1625 beq- hrmGPEMissMiss ; End of chain, this is not good
1626 cmplw r9,r31 ; Is this the mapping to remove?
1627 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1628 bne hrmGRemNext ; No, chain onward
1629 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1630 stw r8,0(r11) ; Unchain gpv->phys mapping
1631 b hrmGDelete ; Finish deleting mapping
1632 hrmGRemRetry:
1633 lwarx r0,0,r11 ; Get previous link
1634 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1635 stwcx. r0,0,r11 ; Update previous link
1636 bne- hrmGRemRetry ; Lost reservation, retry
1637 b hrmGDelete ; Finish deleting mapping
1638
1639 hrmGRemNext:
1640 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1641 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1642 mr. r9,r8 ; Does next entry exist?
1643 b hrmGRemLoop ; Carry on
1644
1645 hrmGRemove64:
1646 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1647 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1648 la r11,ppLink(r29) ; Point to chain anchor
1649 ld r9,ppLink(r29) ; Get chain anchor
1650 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1651 hrmGRem64Lp:
1652 beq-- hrmGPEMissMiss ; End of chain, this is not good
1653 cmpld r9,r31 ; Is this the mapping to remove?
1654 ld r8,mpAlias(r9) ; Get forward chain pinter
1655 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1656 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1657 std r8,0(r11) ; Unchain gpv->phys mapping
1658 b hrmGDelete ; Finish deleting mapping
1659 hrmGRem64Rt:
1660 ldarx r0,0,r11 ; Get previous link
1661 and r0,r0,r7 ; Get flags
1662 or r0,r0,r8 ; Insert new forward pointer
1663 stdcx. r0,0,r11 ; Slam it back in
1664 bne-- hrmGRem64Rt ; Lost reservation, retry
1665 b hrmGDelete ; Finish deleting mapping
1666
1667 .align 5
1668 hrmGRem64Nxt:
1669 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1670 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1671 mr. r9,r8 ; Does next entry exist?
1672 b hrmGRem64Lp ; Carry on
1673
1674 hrmGDelete:
1675 mr r3,r29 ; r3 <- physent addr
1676 bl mapPhysUnlock ; Unlock physent chain
1677 lwz r3,mpFlags(r31) ; Get mapping's flags
1678 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1679 ori r3,r3,mpgFree ; Mark mapping free
1680 stw r3,mpFlags(r31) ; Update flags
1681 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1682
1683 hrmGReturn:
1684 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1685 bl sxlkUnlock ; Release host pmap search lock
1686
1687 mr r3,r25 ; r3 <- return code
1688 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1689 mtmsr r17 ; Restore 'rupts, translation
1690 isync ; Throw a small wrench into the pipeline
1691 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1692 hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1693 b hrmRetnCmn ; Join common return
1694
1695 hrmGBadPLock:
1696 hrmGPEMissMiss:
1697 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1698 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1699 li r3,failMapping ; All the way from New Orleans
1700 sc ; To Jeruselem
1701
1702
1703 /*
1704 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1705 *
1706 * Upon entry, R3 contains a pointer to a physent.
1707 *
1708 * This function removes the first mapping from a physical entry
1709 * alias list. It locks the list, extracts the vaddr and pmap from
1710 * the first entry. It then jumps into the hw_rem_map function.
1711 * NOTE: since we jump into rem_map, we need to set up the stack
1712 * identically. Also, we set the next parm to 0 so we do not
1713 * try to save a next vaddr.
1714 *
1715 * We return the virtual address of the removed mapping as a
1716 * R3.
1717 *
1718 * Note that this is designed to be called from 32-bit mode with a stack.
1719 *
1720 * We disable translation and all interruptions here. This keeps is
1721 * from having to worry about a deadlock due to having anything locked
1722 * and needing it to process a fault.
1723 *
1724 * Note that this must be done with both interruptions off and VM off
1725 *
1726 *
1727 * Remove mapping via physical page (mapping_purge)
1728 *
1729 * 1) lock physent
1730 * 2) extract vaddr and pmap
1731 * 3) unlock physent
1732 * 4) do "remove mapping via pmap"
1733 *
1734 *
1735 */
1736
1737 .align 5
1738 .globl EXT(hw_purge_phys)
1739
1740 LEXT(hw_purge_phys)
1741 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1742 mflr r0 ; Save the link register
1743 stw r15,FM_ARG0+0x00(r1) ; Save a register
1744 stw r16,FM_ARG0+0x04(r1) ; Save a register
1745 stw r17,FM_ARG0+0x08(r1) ; Save a register
1746 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1747 stw r19,FM_ARG0+0x10(r1) ; Save a register
1748 stw r20,FM_ARG0+0x14(r1) ; Save a register
1749 stw r21,FM_ARG0+0x18(r1) ; Save a register
1750 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1751 stw r23,FM_ARG0+0x20(r1) ; Save a register
1752 stw r24,FM_ARG0+0x24(r1) ; Save a register
1753 stw r25,FM_ARG0+0x28(r1) ; Save a register
1754 li r6,0 ; Set no next address return
1755 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1756 stw r27,FM_ARG0+0x30(r1) ; Save a register
1757 stw r28,FM_ARG0+0x34(r1) ; Save a register
1758 stw r29,FM_ARG0+0x38(r1) ; Save a register
1759 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1760 stw r31,FM_ARG0+0x40(r1) ; Save a register
1761 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1762 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1763
1764 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1765
1766 bl mapPhysLock ; Lock the physent
1767
1768 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1769
1770 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1771 li r0,ppFlags ; Set the bottom stuff to clear
1772 b hppJoin ; Join the common...
1773
1774 hppSF: li r0,ppLFAmask
1775 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1776 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1777
1778 hppJoin: andc. r12,r12,r0 ; Clean and test link
1779 beq-- hppNone ; There are no more mappings on physical page
1780
1781 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1782 lhz r7,mpSpace(r12) ; Get the address space hash
1783 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1784 slwi r0,r7,2 ; Multiply space by 4
1785 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1786 slwi r7,r7,3 ; Multiply space by 8
1787 lwz r5,mpVAddr+4(r12) ; and the bottom
1788 add r7,r7,r0 ; Get correct displacement into translate table
1789 lwz r28,0(r28) ; Get the actual translation map
1790
1791 add r28,r28,r7 ; Point to the pmap translation
1792
1793 bl mapPhysUnlock ; Time to unlock the physical entry
1794
1795 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1796
1797 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1798 b hrmJoin ; Go remove the mapping...
1799
1800 hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1801 b hrmJoin ; Go remove the mapping...
1802
1803 .align 5
1804
1805 hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1806
1807 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1808
1809 mtmsr r11 ; Restore enables/translation/etc.
1810 isync
1811 b hppRetnCmn ; Join the common return code...
1812
1813 hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1814 isync
1815
1816 ;
1817 ; NOTE: we have not used any registers other than the volatiles to this point
1818 ;
1819
1820 hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1821
1822 li r3,mapRtEmpty ; Physent chain is empty
1823 mtlr r12 ; Restore the return
1824 lwz r1,0(r1) ; Pop the stack
1825 blr ; Leave...
1826
1827 /*
1828 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1829 *
1830 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1831 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1832 *
1833 * We return the virtual address of the removed mapping as a
1834 * R3.
1835 *
1836 * Note that this is designed to be called from 32-bit mode with a stack.
1837 *
1838 * We disable translation and all interruptions here. This keeps is
1839 * from having to worry about a deadlock due to having anything locked
1840 * and needing it to process a fault.
1841 *
1842 * Note that this must be done with both interruptions off and VM off
1843 *
1844 * Remove a mapping which can be reestablished by VM
1845 *
1846 */
1847
1848 .align 5
1849 .globl EXT(hw_purge_map)
1850
1851 LEXT(hw_purge_map)
1852 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1853 mflr r0 ; Save the link register
1854 stw r15,FM_ARG0+0x00(r1) ; Save a register
1855 stw r16,FM_ARG0+0x04(r1) ; Save a register
1856 stw r17,FM_ARG0+0x08(r1) ; Save a register
1857 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1858 stw r19,FM_ARG0+0x10(r1) ; Save a register
1859 mfsprg r19,2 ; Get feature flags
1860 stw r20,FM_ARG0+0x14(r1) ; Save a register
1861 stw r21,FM_ARG0+0x18(r1) ; Save a register
1862 mtcrf 0x02,r19 ; move pf64Bit cr6
1863 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1864 stw r23,FM_ARG0+0x20(r1) ; Save a register
1865 stw r24,FM_ARG0+0x24(r1) ; Save a register
1866 stw r25,FM_ARG0+0x28(r1) ; Save a register
1867 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1868 stw r27,FM_ARG0+0x30(r1) ; Save a register
1869 stw r28,FM_ARG0+0x34(r1) ; Save a register
1870 stw r29,FM_ARG0+0x38(r1) ; Save a register
1871 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1872 stw r31,FM_ARG0+0x40(r1) ; Save a register
1873 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1874 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1875
1876 #if DEBUG
1877 lwz r11,pmapFlags(r3) ; Get pmaps flags
1878 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1879 bne hpmPanic ; Call not valid for guest shadow assist pmap
1880 #endif
1881
1882 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1883 lwz r9,pmapvr+4(r3) ; Get conversion mask
1884 b hpmSF1x ; Done...
1885
1886 hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1887
1888 hpmSF1x:
1889 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1890
1891 xor r28,r3,r9 ; Convert the pmap to physical addressing
1892
1893 mr r17,r11 ; Save the MSR
1894
1895 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1896 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1897 mr. r3,r3 ; Did we get the lock?
1898 bne-- hrmBadLock ; Nope...
1899 ;
1900 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
1901 ; here so that we will know the previous elements so we can dequeue them
1902 ; later.
1903 ;
1904 hpmSearch:
1905 mr r3,r28 ; Pass in pmap to search
1906 mr r29,r4 ; Top half of vaddr
1907 mr r30,r5 ; Bottom half of vaddr
1908 bl EXT(mapSearchFull) ; Rescan the list
1909 mr. r31,r3 ; Did we? (And remember mapping address for later)
1910 or r0,r4,r5 ; Are we beyond the end?
1911 mr r15,r4 ; Save top of next vaddr
1912 cmplwi cr1,r0,0 ; See if there is another
1913 mr r16,r5 ; Save bottom of next vaddr
1914 bne-- hpmGotOne ; We found one, go check it out...
1915
1916 hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1917 b hrmNotFound ; No more in pmap to check...
1918
1919 hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1920 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
1921 rlwinm r21,r20,8,24,31 ; Extract the busy count
1922 cmplwi cr2,r21,0 ; Is it busy?
1923 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
1924 beq++ hrmGotX ; Found, branch to remove the mapping...
1925 b hpmCNext ; Nope...
1926
1927 hpmPanic: lis r0,hi16(Choke) ; System abend
1928 ori r0,r0,lo16(Choke) ; System abend
1929 li r3,failMapping ; Show that we failed some kind of mapping thing
1930 sc
1931
1932 /*
1933 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1934 *
1935 * Upon entry, R3 contains a pointer to a pmap.
1936 * pa is a pointer to the physent
1937 *
1938 * This function removes the first mapping for a specific pmap from a physical entry
1939 * alias list. It locks the list, extracts the vaddr and pmap from
1940 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1941 * NOTE: since we jump into rem_map, we need to set up the stack
1942 * identically. Also, we set the next parm to 0 so we do not
1943 * try to save a next vaddr.
1944 *
1945 * We return the virtual address of the removed mapping as a
1946 * R3.
1947 *
1948 * Note that this is designed to be called from 32-bit mode with a stack.
1949 *
1950 * We disable translation and all interruptions here. This keeps is
1951 * from having to worry about a deadlock due to having anything locked
1952 * and needing it to process a fault.
1953 *
1954 * Note that this must be done with both interruptions off and VM off
1955 *
1956 *
1957 * Remove mapping via physical page (mapping_purge)
1958 *
1959 * 1) lock physent
1960 * 2) extract vaddr and pmap
1961 * 3) unlock physent
1962 * 4) do "remove mapping via pmap"
1963 *
1964 *
1965 */
1966
1967 .align 5
1968 .globl EXT(hw_purge_space)
1969
1970 LEXT(hw_purge_space)
1971 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1972 mflr r0 ; Save the link register
1973 stw r15,FM_ARG0+0x00(r1) ; Save a register
1974 stw r16,FM_ARG0+0x04(r1) ; Save a register
1975 stw r17,FM_ARG0+0x08(r1) ; Save a register
1976 mfsprg r2,2 ; Get feature flags
1977 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1978 stw r19,FM_ARG0+0x10(r1) ; Save a register
1979 stw r20,FM_ARG0+0x14(r1) ; Save a register
1980 stw r21,FM_ARG0+0x18(r1) ; Save a register
1981 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1982 mtcrf 0x02,r2 ; move pf64Bit cr6
1983 stw r23,FM_ARG0+0x20(r1) ; Save a register
1984 stw r24,FM_ARG0+0x24(r1) ; Save a register
1985 stw r25,FM_ARG0+0x28(r1) ; Save a register
1986 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1987 stw r27,FM_ARG0+0x30(r1) ; Save a register
1988 li r6,0 ; Set no next address return
1989 stw r28,FM_ARG0+0x34(r1) ; Save a register
1990 stw r29,FM_ARG0+0x38(r1) ; Save a register
1991 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1992 stw r31,FM_ARG0+0x40(r1) ; Save a register
1993 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1994 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1995
1996 #if DEBUG
1997 lwz r11,pmapFlags(r4) ; Get pmaps flags
1998 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1999 bne hpsPanic ; Call not valid for guest shadow assist pmap
2000 #endif
2001
2002 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
2003
2004 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
2005
2006 b hpsSF1x ; Done...
2007
2008 hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2009
2010 hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2011
2012 xor r4,r4,r9 ; Convert the pmap to physical addressing
2013
2014 bl mapPhysLock ; Lock the physent
2015
2016 lwz r8,pmapSpace(r4) ; Get the space hash
2017
2018 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2019
2020 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2021
2022 hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2023 beq hpsNone ; Did not find one...
2024
2025 lhz r10,mpSpace(r12) ; Get the space
2026
2027 cmplw r10,r8 ; Is this one of ours?
2028 beq hpsFnd ; Yes...
2029
2030 lwz r12,mpAlias+4(r12) ; Chain on to the next
2031 b hpsSrc32 ; Check it out...
2032
2033 .align 5
2034
2035 hpsSF: li r0,ppLFAmask
2036 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2037 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2038
2039 hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2040 beq hpsNone ; Did not find one...
2041
2042 lhz r10,mpSpace(r12) ; Get the space
2043
2044 cmplw r10,r8 ; Is this one of ours?
2045 beq hpsFnd ; Yes...
2046
2047 ld r12,mpAlias(r12) ; Chain on to the next
2048 b hpsSrc64 ; Check it out...
2049
2050 .align 5
2051
2052 hpsFnd: mr r28,r4 ; Set the pmap physical address
2053 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2054 lwz r5,mpVAddr+4(r12) ; and the bottom
2055
2056 bl mapPhysUnlock ; Time to unlock the physical entry
2057 b hrmJoin ; Go remove the mapping...
2058
2059 .align 5
2060
2061 hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2062
2063 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
2064
2065 mtmsr r11 ; Restore enables/translation/etc.
2066 isync
2067 b hpsRetnCmn ; Join the common return code...
2068
2069 hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2070 isync
2071
2072 ;
2073 ; NOTE: we have not used any registers other than the volatiles to this point
2074 ;
2075
2076 hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2077
2078 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
2079 mtlr r12 ; Restore the return
2080 lwz r1,0(r1) ; Pop the stack
2081 blr ; Leave...
2082
2083 hpsPanic: lis r0,hi16(Choke) ; System abend
2084 ori r0,r0,lo16(Choke) ; System abend
2085 li r3,failMapping ; Show that we failed some kind of mapping thing
2086 sc
2087
2088 /*
2089 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2090 * on this physent chain
2091 *
2092 * Locates the first guest mapping on the physent chain that is associated with the
2093 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2094 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2095 * repeatedly until no additional guest mappings that match our criteria are removed.
2096 *
2097 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2098 *
2099 * Parameters:
2100 * r3 : physent, 32-bit kernel virtual address
2101 * r4 : host pmap, 32-bit kernel virtual address
2102 *
2103 * Volatile register usage (for linkage through hrmJoin):
2104 * r4 : high-order 32 bits of guest virtual address
2105 * r5 : low-order 32 bits of guest virtual address
2106 * r11: saved MSR image
2107 *
2108 * Non-volatile register usage:
2109 * r26: VMM extension block's physical address
2110 * r27: host pmap's physical address
2111 * r28: guest pmap's physical address
2112 *
2113 */
2114
2115 .align 5
2116 .globl EXT(hw_scrub_guest)
2117
2118 LEXT(hw_scrub_guest)
2119 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2120 mflr r0 ; Save the link register
2121 stw r15,FM_ARG0+0x00(r1) ; Save a register
2122 stw r16,FM_ARG0+0x04(r1) ; Save a register
2123 stw r17,FM_ARG0+0x08(r1) ; Save a register
2124 mfsprg r2,2 ; Get feature flags
2125 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2126 stw r19,FM_ARG0+0x10(r1) ; Save a register
2127 stw r20,FM_ARG0+0x14(r1) ; Save a register
2128 stw r21,FM_ARG0+0x18(r1) ; Save a register
2129 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2130 mtcrf 0x02,r2 ; move pf64Bit cr6
2131 stw r23,FM_ARG0+0x20(r1) ; Save a register
2132 stw r24,FM_ARG0+0x24(r1) ; Save a register
2133 stw r25,FM_ARG0+0x28(r1) ; Save a register
2134 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2135 stw r27,FM_ARG0+0x30(r1) ; Save a register
2136 li r6,0 ; Set no next address return
2137 stw r28,FM_ARG0+0x34(r1) ; Save a register
2138 stw r29,FM_ARG0+0x38(r1) ; Save a register
2139 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2140 stw r31,FM_ARG0+0x40(r1) ; Save a register
2141 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2142 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2143
2144 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2145
2146 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2147 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2148 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2149 b hsgStart ; Get to work
2150
2151 hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2152 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2153
2154 hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2155 xor r27,r4,r9 ; Convert host pmap_t virt->real
2156 bl mapPhysLock ; Lock the physent
2157
2158 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2159
2160 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2161 hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2162 beq hsg32Miss ; Did not find one...
2163 lwz r8,mpFlags(r12) ; Get mapping's flags
2164 lhz r7,mpSpace(r12) ; Get mapping's space id
2165 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2166 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2167 xori r8,r8,mpGuest ; Is it a guest mapping?
2168 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2169 slwi r9,r7,2 ; Multiply space by 4
2170 lwz r28,0(r28) ; Get the actual translation map
2171 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2172 slwi r7,r7,3 ; Multiply space by 8
2173 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2174 add r7,r7,r9 ; Get correct displacement into translate table
2175 add r28,r28,r7 ; Point to the pmap translation
2176 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2177 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2178 xor r7,r7,r26 ; Is guest associated with specified host?
2179 or. r7,r7,r8 ; Guest mapping && associated with host?
2180 lwz r12,mpAlias+4(r12) ; Chain on to the next
2181 bne hsg32Loop ; Try next mapping on alias chain
2182
2183 hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2184 b hrmJoin ; Join common path for mapping removal
2185
2186 .align 5
2187 hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2188 mtmsr r11 ; Restore 'rupts, translation
2189 isync ; Throw a small wrench into the pipeline
2190 li r3,mapRtEmpty ; No mappings found matching specified criteria
2191 b hrmRetnCmn ; Exit through common epilog
2192
2193 .align 5
2194 hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2195 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2196 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2197 hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2198 beq hsg64Miss ; Did not find one...
2199 lwz r8,mpFlags(r12) ; Get mapping's flags
2200 lhz r7,mpSpace(r12) ; Get mapping's space id
2201 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2202 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2203 xori r8,r8,mpGuest ; Is it a guest mapping?
2204 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2205 slwi r9,r7,2 ; Multiply space by 4
2206 lwz r28,0(r28) ; Get the actual translation map
2207 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2208 slwi r7,r7,3 ; Multiply space by 8
2209 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2210 add r7,r7,r9 ; Get correct displacement into translate table
2211 add r28,r28,r7 ; Point to the pmap translation
2212 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2213 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2214 xor r7,r7,r26 ; Is guest associated with specified host?
2215 or. r7,r7,r8 ; Guest mapping && associated with host?
2216 ld r12,mpAlias(r12) ; Chain on to the next
2217 bne hsg64Loop ; Try next mapping on alias chain
2218
2219 hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2220 b hrmJoin ; Join common path for mapping removal
2221
2222 .align 5
2223 hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
2224 mtmsrd r11 ; Restore 'rupts, translation
2225 li r3,mapRtEmpty ; No mappings found matching specified criteria
2226 b hrmRetnCmn ; Exit through common epilog
2227
2228
2229 /*
2230 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2231 *
2232 * Upon entry, R3 contains a pointer to a physent.
2233 * space is the space ID from the pmap in question
2234 *
2235 * We return the virtual address of the found mapping in
2236 * R3. Note that the mapping busy is bumped.
2237 *
2238 * Note that this is designed to be called from 32-bit mode with a stack.
2239 *
2240 * We disable translation and all interruptions here. This keeps is
2241 * from having to worry about a deadlock due to having anything locked
2242 * and needing it to process a fault.
2243 *
2244 */
2245
2246 .align 5
2247 .globl EXT(hw_find_space)
2248
2249 LEXT(hw_find_space)
2250 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2251 mflr r0 ; Save the link register
2252 mr r8,r4 ; Remember the space
2253 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2254
2255 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2256
2257 bl mapPhysLock ; Lock the physent
2258
2259 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2260
2261 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2262
2263 hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2264 beq hfsNone ; Did not find one...
2265
2266 lhz r10,mpSpace(r12) ; Get the space
2267
2268 cmplw r10,r8 ; Is this one of ours?
2269 beq hfsFnd ; Yes...
2270
2271 lwz r12,mpAlias+4(r12) ; Chain on to the next
2272 b hfsSrc32 ; Check it out...
2273
2274 .align 5
2275
2276 hfsSF: li r0,ppLFAmask
2277 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2278 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2279
2280 hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2281 beq hfsNone ; Did not find one...
2282
2283 lhz r10,mpSpace(r12) ; Get the space
2284
2285 cmplw r10,r8 ; Is this one of ours?
2286 beq hfsFnd ; Yes...
2287
2288 ld r12,mpAlias(r12) ; Chain on to the next
2289 b hfsSrc64 ; Check it out...
2290
2291 .align 5
2292
2293 hfsFnd: mr r8,r3 ; Save the physent
2294 mr r3,r12 ; Point to the mapping
2295 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2296
2297 mr r3,r8 ; Get back the physical entry
2298 li r7,0xFFF ; Get a page size mask
2299 bl mapPhysUnlock ; Time to unlock the physical entry
2300
2301 andc r3,r12,r7 ; Move the mapping back down to a page
2302 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2303 xor r12,r3,r12 ; Convert to virtual
2304 b hfsRet ; Time to return
2305
2306 .align 5
2307
2308 hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2309
2310 hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
2311
2312 mtmsr r11 ; Restore enables/translation/etc.
2313 isync
2314 b hfsRetnCmn ; Join the common return code...
2315
2316 hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2317 isync
2318
2319 ;
2320 ; NOTE: we have not used any registers other than the volatiles to this point
2321 ;
2322
2323 hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
2324
2325 #if DEBUG
2326 mr. r3,r3 ; Anything to return?
2327 beq hfsRetnNull ; Nope
2328 lwz r11,mpFlags(r3) ; Get mapping flags
2329 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2330 cmplwi r0,mpGuest ; Shadow guest mapping?
2331 beq hfsPanic ; Yup, kick the bucket
2332 hfsRetnNull:
2333 #endif
2334
2335 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2336
2337 mtlr r12 ; Restore the return
2338 lwz r1,0(r1) ; Pop the stack
2339 blr ; Leave...
2340
2341 hfsPanic: lis r0,hi16(Choke) ; System abend
2342 ori r0,r0,lo16(Choke) ; System abend
2343 li r3,failMapping ; Show that we failed some kind of mapping thing
2344 sc
2345
2346 ;
2347 ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2348 ; Returns 0 if not found or the virtual address of the mapping if
2349 ; if is. Also, the mapping has the busy count bumped.
2350 ;
2351 .align 5
2352 .globl EXT(hw_find_map)
2353
2354 LEXT(hw_find_map)
2355 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2356 mflr r0 ; Save the link register
2357 stw r25,FM_ARG0+0x00(r1) ; Save a register
2358 stw r26,FM_ARG0+0x04(r1) ; Save a register
2359 mr r25,r6 ; Remember address of next va
2360 stw r27,FM_ARG0+0x08(r1) ; Save a register
2361 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2362 stw r29,FM_ARG0+0x10(r1) ; Save a register
2363 stw r30,FM_ARG0+0x14(r1) ; Save a register
2364 stw r31,FM_ARG0+0x18(r1) ; Save a register
2365 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2366
2367 #if DEBUG
2368 lwz r11,pmapFlags(r3) ; Get pmaps flags
2369 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2370 bne hfmPanic ; Call not valid for guest shadow assist pmap
2371 #endif
2372
2373 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2374 lwz r7,pmapvr+4(r3) ; Get the second part
2375
2376
2377 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2378
2379 mr r27,r11 ; Remember the old MSR
2380 mr r26,r12 ; Remember the feature bits
2381
2382 xor r28,r3,r7 ; Change the common 32- and 64-bit half
2383
2384 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
2385
2386 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
2387
2388 hfmSF1: mr r29,r4 ; Save top half of vaddr
2389 mr r30,r5 ; Save the bottom half
2390
2391 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2392 bl sxlkShared ; Go get a shared lock on the mapping lists
2393 mr. r3,r3 ; Did we get the lock?
2394 bne-- hfmBadLock ; Nope...
2395
2396 mr r3,r28 ; get the pmap address
2397 mr r4,r29 ; Get bits 0:31 to look for
2398 mr r5,r30 ; Get bits 32:64
2399
2400 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
2401
2402 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2403 mr. r31,r3 ; Save the mapping if we found it
2404 cmplwi cr1,r0,0 ; Are we removing?
2405 mr r29,r4 ; Save next va high half
2406 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2407 mr r30,r5 ; Save next va low half
2408 li r6,0 ; Assume we did not find it
2409 li r26,0xFFF ; Get a mask to relocate to start of mapping page
2410
2411 bt-- cr0_eq,hfmNotFnd ; We did not find it...
2412
2413 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2414
2415 andc r4,r31,r26 ; Get back to the mapping page start
2416
2417 ; Note: we can treat 32- and 64-bit the same here. Because we are going from
2418 ; physical to virtual and we only do 32-bit virtual, we only need the low order
2419 ; word of the xor.
2420
2421 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2422 li r6,-1 ; Indicate we found it and it is not being removed
2423 xor r31,r31,r4 ; Flip to virtual
2424
2425 hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2426 bl sxlkUnlock ; Unlock the search list
2427
2428 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2429 and r3,r3,r6 ; Clear if not found or removing
2430
2431 hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
2432
2433 mtmsr r27 ; Restore enables/translation/etc.
2434 isync
2435 b hfmReturnC ; Join common...
2436
2437 hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2438 isync
2439
2440 hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2441 stw r30,4(r25) ; Save the bottom of the next va
2442 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2443 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2444 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2445 and r3,r3,r6 ; Clear return if the mapping is being removed
2446 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2447 mtlr r0 ; Restore the return
2448 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2449 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2450 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2451 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2452 lwz r1,0(r1) ; Pop the stack
2453 blr ; Leave...
2454
2455 .align 5
2456
2457 hfmBadLock: li r3,1 ; Set lock time out error code
2458 b hfmReturn ; Leave....
2459
2460 hfmPanic: lis r0,hi16(Choke) ; System abend
2461 ori r0,r0,lo16(Choke) ; System abend
2462 li r3,failMapping ; Show that we failed some kind of mapping thing
2463 sc
2464
2465
2466 /*
2467 * void hw_clear_maps(void)
2468 *
2469 * Remove all mappings for all phys entries.
2470 *
2471 *
2472 */
2473
2474 .align 5
2475 .globl EXT(hw_clear_maps)
2476
2477 LEXT(hw_clear_maps)
2478 mflr r10 ; Save the link register
2479 mfcr r9 ; Save the condition register
2480 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2481
2482 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2483 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2484
2485 hcmNextRegion:
2486 lwz r3,mrPhysTab(r5) ; Get the actual table address
2487 lwz r0,mrStart(r5) ; Get start of table entry
2488 lwz r4,mrEnd(r5) ; Get end of table entry
2489 addi r5,r5,mrSize ; Point to the next regions
2490
2491 cmplwi r3,0 ; No more regions?
2492 beq-- hcmDone ; Leave...
2493
2494 sub r4,r4,r0 ; Calculate physical entry count
2495 addi r4,r4,1
2496 mtctr r4
2497
2498 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2499
2500
2501 hcmNextPhys32:
2502 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2503 addi r3,r3,physEntrySize ; Next phys_entry
2504
2505 hcmNextMap32:
2506 rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address
2507 beq hcmNoMap32 ; Did not find one...
2508
2509 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2510 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2511 stw r0,mpPte(r4) ; Get the quick pointer again
2512
2513 lwz r4,mpAlias+4(r4) ; Chain on to the next
2514 b hcmNextMap32 ; Check it out...
2515 hcmNoMap32:
2516 bdnz hcmNextPhys32
2517 b hcmNextRegion
2518
2519
2520 .align 5
2521 hcmNextPhys64:
2522 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2523 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2524 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2525 addi r3,r3,physEntrySize ; Next phys_entry
2526
2527 hcmNextMap64:
2528 andc. r4,r4,r0 ; Clean and test mapping address
2529 beq hcmNoMap64 ; Did not find one...
2530
2531 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2532 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2533 stw r0,mpPte(r4) ; Get the quick pointer again
2534
2535 ld r4,mpAlias(r4) ; Chain on to the next
2536 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2537 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2538 b hcmNextMap64 ; Check it out...
2539 hcmNoMap64:
2540 bdnz hcmNextPhys64
2541 b hcmNextRegion
2542
2543
2544 .align 5
2545 hcmDone:
2546 mtlr r10 ; Restore the return
2547 mtcr r9 ; Restore the condition register
2548 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2549 hcmDone32:
2550 mtmsr r11 ; Restore translation/mode/etc.
2551 isync
2552 blr ; Leave...
2553
2554 hcmDone64:
2555 mtmsrd r11 ; Restore translation/mode/etc.
2556 isync
2557 blr ; Leave...
2558
2559
2560
2561 /*
2562 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
2563 * walks all mapping for a physical page and performs
2564 * specified operations on each.
2565 *
2566 * pp is unlocked physent
2567 * preop is operation to perform on physent before walk. This would be
2568 * used to set cache attribute or protection
2569 * op is the operation to perform on each mapping during walk
2570 * postop is operation to perform in the phsyent after walk. this would be
2571 * used to set or reset the RC bits.
2572 * opmod modifies the action taken on any connected PTEs visited during
2573 * the mapping walk.
2574 *
2575 * We return the RC bits from before postop is run.
2576 *
2577 * Note that this is designed to be called from 32-bit mode with a stack.
2578 *
2579 * We disable translation and all interruptions here. This keeps is
2580 * from having to worry about a deadlock due to having anything locked
2581 * and needing it to process a fault.
2582 *
2583 * We lock the physent, execute preop, and then walk each mapping in turn.
2584 * If there is a PTE, it is invalidated and the RC merged into the physent.
2585 * Then we call the op function.
2586 * Then we revalidate the PTE.
2587 * Once all all mappings are finished, we save the physent RC and call the
2588 * postop routine. Then we unlock the physent and return the RC.
2589 *
2590 *
2591 */
2592
2593 .align 5
2594 .globl EXT(hw_walk_phys)
2595
2596 LEXT(hw_walk_phys)
2597 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2598 mflr r0 ; Save the link register
2599 stw r24,FM_ARG0+0x00(r1) ; Save a register
2600 stw r25,FM_ARG0+0x04(r1) ; Save a register
2601 stw r26,FM_ARG0+0x08(r1) ; Save a register
2602 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2603 mr r24,r8 ; Save the parm
2604 mr r25,r7 ; Save the parm
2605 stw r28,FM_ARG0+0x10(r1) ; Save a register
2606 stw r29,FM_ARG0+0x14(r1) ; Save a register
2607 stw r30,FM_ARG0+0x18(r1) ; Save a register
2608 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2609 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2610
2611 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2612
2613 mfsprg r26,0 ; (INSTRUMENTATION)
2614 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2615 addi r27,r27,1 ; (INSTRUMENTATION)
2616 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2617 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2618 slwi r12,r24,2 ; (INSTRUMENTATION)
2619 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2620 addi r27,r27,1 ; (INSTRUMENTATION)
2621 stwx r27,r26,r12 ; (INSTRUMENTATION)
2622
2623 mr r26,r11 ; Save the old MSR
2624 lis r27,hi16(hwpOpBase) ; Get high order of op base
2625 slwi r4,r4,7 ; Convert preop to displacement
2626 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2627 slwi r5,r5,7 ; Convert op to displacement
2628 add r12,r4,r27 ; Point to the preop routine
2629 slwi r28,r6,7 ; Convert postop to displacement
2630 mtctr r12 ; Set preop routine
2631 add r28,r28,r27 ; Get the address of the postop routine
2632 add r27,r5,r27 ; Get the address of the op routine
2633
2634 bl mapPhysLock ; Lock the physent
2635
2636 mr r29,r3 ; Save the physent address
2637
2638 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2639
2640 bctrl ; Call preop routine
2641 bne- hwpEarly32 ; preop says to bail now...
2642
2643 cmplwi r24,hwpMergePTE ; Classify operation modifier
2644 mtctr r27 ; Set up the op function address
2645 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2646 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2647 beq hwpMSrc32 ; Do TLB merge for each mapping
2648
2649 hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2650 beq hwpNone32 ; Did not find one...
2651
2652 bctrl ; Call the op function
2653
2654 bne- hwpEarly32 ; op says to bail now...
2655 lwz r31,mpAlias+4(r31) ; Chain on to the next
2656 b hwpQSrc32 ; Check it out...
2657
2658 .align 5
2659 hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2660 beq hwpNone32 ; Did not find one...
2661
2662 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2663 bctrl ; Call the op function
2664
2665 bne- hwpEarly32 ; op says to bail now...
2666 lwz r31,mpAlias+4(r31) ; Chain on to the next
2667 b hwpMSrc32 ; Check it out...
2668
2669 .align 5
2670 hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2671 beq hwpNone32 ; Did not find one...
2672
2673 ;
2674 ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2675 ; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2676 ; If there is no PTE, PTE low is obtained from mapping
2677 ;
2678 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2679
2680 bctrl ; Call the op function
2681
2682 crmove cr1_eq,cr0_eq ; Save the return code
2683
2684 mr. r3,r3 ; Was there a previously valid PTE?
2685 beq- hwpNxt32 ; Nope...
2686
2687 stw r5,4(r3) ; Store second half of PTE
2688 eieio ; Make sure we do not reorder
2689 stw r4,0(r3) ; Revalidate the PTE
2690
2691 eieio ; Make sure all updates come first
2692 stw r6,0(r7) ; Unlock the PCA
2693
2694 hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2695 lwz r31,mpAlias+4(r31) ; Chain on to the next
2696 b hwpSrc32 ; Check it out...
2697
2698 .align 5
2699
2700 hwpNone32: mtctr r28 ; Get the post routine address
2701
2702 lwz r30,ppLink+4(r29) ; Save the old RC
2703 mr r3,r29 ; Get the physent address
2704 bctrl ; Call post routine
2705
2706 bl mapPhysUnlock ; Unlock the physent
2707
2708 mtmsr r26 ; Restore translation/mode/etc.
2709 isync
2710
2711 b hwpReturn ; Go restore registers and return...
2712
2713 .align 5
2714
2715 hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2716 mr r3,r29 ; Get the physent address
2717 bl mapPhysUnlock ; Unlock the physent
2718
2719 mtmsr r26 ; Restore translation/mode/etc.
2720 isync
2721
2722 b hwpReturn ; Go restore registers and return...
2723
2724 .align 5
2725
2726 hwp64: bctrl ; Call preop routine
2727 bne-- hwpEarly64 ; preop says to bail now...
2728
2729 cmplwi r24,hwpMergePTE ; Classify operation modifier
2730 mtctr r27 ; Set up the op function address
2731
2732 li r24,ppLFAmask
2733 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2734 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2735 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2736 beq hwpMSrc64 ; Do TLB merge for each mapping
2737
2738 hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2739 beq hwpNone64 ; Did not find one...
2740
2741 bctrl ; Call the op function
2742
2743 bne-- hwpEarly64 ; op says to bail now...
2744 ld r31,mpAlias(r31) ; Chain on to the next
2745 b hwpQSrc64 ; Check it out...
2746
2747 .align 5
2748 hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2749 beq hwpNone64 ; Did not find one...
2750
2751 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2752 bctrl ; Call the op function
2753
2754 bne-- hwpEarly64 ; op says to bail now...
2755 ld r31,mpAlias(r31) ; Chain on to the next
2756 b hwpMSrc64 ; Check it out...
2757
2758 .align 5
2759 hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2760 beq hwpNone64 ; Did not find one...
2761 ;
2762 ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2763 ; PTE low in R5. PTEG comes back locked if there is one
2764 ;
2765 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2766
2767 bctrl ; Call the op function
2768
2769 crmove cr1_eq,cr0_eq ; Save the return code
2770
2771 mr. r3,r3 ; Was there a previously valid PTE?
2772 beq-- hwpNxt64 ; Nope...
2773
2774 std r5,8(r3) ; Save bottom of PTE
2775 eieio ; Make sure we do not reorder
2776 std r4,0(r3) ; Revalidate the PTE
2777
2778 eieio ; Make sure all updates come first
2779 stw r6,0(r7) ; Unlock the PCA
2780
2781 hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2782 ld r31,mpAlias(r31) ; Chain on to the next
2783 b hwpSrc64 ; Check it out...
2784
2785 .align 5
2786
2787 hwpNone64: mtctr r28 ; Get the post routine address
2788
2789 lwz r30,ppLink+4(r29) ; Save the old RC
2790 mr r3,r29 ; Get the physent address
2791 bctrl ; Call post routine
2792
2793 bl mapPhysUnlock ; Unlock the physent
2794
2795 mtmsrd r26 ; Restore translation/mode/etc.
2796 isync
2797 b hwpReturn ; Go restore registers and return...
2798
2799 .align 5
2800
2801 hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2802 mr r3,r29 ; Get the physent address
2803 bl mapPhysUnlock ; Unlock the physent
2804
2805 mtmsrd r26 ; Restore translation/mode/etc.
2806 isync
2807
2808 hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2809 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2810 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2811 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
2812 mr r3,r30 ; Pass back the RC
2813 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2814 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
2815 mtlr r0 ; Restore the return
2816 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2817 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2818 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
2819 lwz r1,0(r1) ; Pop the stack
2820 blr ; Leave...
2821
2822
2823 ;
2824 ; The preop/op/postop function table.
2825 ; Each function must be 64-byte aligned and be no more than
2826 ; 16 instructions. If more than 16, we must fix address calculations
2827 ; at the start of hwpOpBase
2828 ;
2829 ; The routine must set CR0_EQ in order to continue scan.
2830 ; If CR0_EQ is not set, an early return from the function is made.
2831 ;
2832
2833 .align 7
2834
2835 hwpOpBase:
2836
2837 ; Function 0 - No operation
2838
2839 hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2840 blr ; Just return...
2841
2842 .align 5
2843
2844 ; This is the continuation of function 4 - Set attributes in mapping
2845
2846 ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2847 ; NOTE: Do we have to deal with i-cache here?
2848
2849 hwpSAM: li r11,4096 ; Get page size
2850
2851 hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2852 dcbf r11,r5 ; Flush the line in the data cache
2853 bgt++ hwpSAMinvd ; Go do the rest of it...
2854
2855 sync ; Make sure it is done
2856
2857 li r11,4096 ; Get page size
2858
2859 hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2860 icbi r11,r5 ; Flush the line in the icache
2861 bgt++ hwpSAMinvi ; Go do the rest of it...
2862
2863 sync ; Make sure it is done
2864
2865 cmpw r0,r0 ; Make sure we return CR0_EQ
2866 blr ; Return...
2867
2868
2869 ; Function 1 - Set protection in physent (obsolete)
2870
2871 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2872
2873 hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
2874 blr ; Return...
2875
2876
2877 ; Function 2 - Set protection in mapping
2878
2879 ; NOTE: Changes to no-execute permission are ignored
2880
2881 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
2882
2883 hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2884 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2885 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2886 li r0,lo16(mpPP) ; Get protection bits
2887 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2888 rlwinm r2,r25,0,mpPP ; Isolate new protection bits
2889 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2890 andc r5,r5,r0 ; Clear the old prot bits
2891 or r5,r5,r2 ; Move in the new prot bits
2892 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2893 cmpw r0,r0 ; Make sure we return CR0_EQ
2894 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2895 blr ; Leave...
2896
2897 ; Function 3 - Set attributes in physent
2898
2899 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
2900
2901 hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
2902
2903 hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2904 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
2905 stwcx. r4,r5,r29 ; Try to stuff it
2906 bne-- hwpSAtrPhX ; Try again...
2907 ; Note: CR0_EQ is set because of stwcx.
2908 blr ; Return...
2909
2910 ; Function 4 - Set attributes in mapping
2911
2912 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2913
2914 hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2915 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2916 li r2,mpM ; Force on coherent
2917 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2918 li r0,lo16(mpWIMG) ; Get wimg mask
2919 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2920 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2921 ; Copy in the cache inhibited bit
2922 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2923 andc r5,r5,r0 ; Clear the old wimg
2924 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2925 ; Copy in the guarded bit
2926 mfsprg r9,2 ; Feature flags
2927 or r5,r5,r2 ; Move in the new wimg
2928 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2929 lwz r2,mpPAddr(r31) ; Get the physical address
2930 li r0,0xFFF ; Start a mask
2931 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2932 rlwinm r5,r0,0,1,0 ; Copy to top half
2933 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2934 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2935 and r5,r5,r2 ; Clean stuff in top 32 bits
2936 andc r2,r2,r0 ; Clean bottom too
2937 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2938 b hwpSAM ; Join common
2939
2940 ; NOTE: we moved the remainder of the code out of here because it
2941 ; did not fit in the 128 bytes allotted. It got stuck into the free space
2942 ; at the end of the no-op function.
2943
2944
2945
2946
2947 ; Function 5 - Clear reference in physent
2948
2949 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
2950
2951 hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2952
2953 hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2954 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2955 stwcx. r4,r5,r29 ; Try to stuff it
2956 bne-- hwpCRefPhX ; Try again...
2957 ; Note: CR0_EQ is set because of stwcx.
2958 blr ; Return...
2959
2960
2961 ; Function 6 - Clear reference in mapping
2962
2963 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
2964
2965 hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2966 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2967 andc r5,r5,r0 ; Clear in PTE copy
2968 andc r8,r8,r0 ; and in the mapping
2969 cmpw r0,r0 ; Make sure we return CR0_EQ
2970 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2971 blr ; Return...
2972
2973
2974 ; Function 7 - Clear change in physent
2975
2976 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
2977
2978 hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2979
2980 hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2981 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2982 stwcx. r4,r5,r29 ; Try to stuff it
2983 bne-- hwpCCngPhX ; Try again...
2984 ; Note: CR0_EQ is set because of stwcx.
2985 blr ; Return...
2986
2987
2988 ; Function 8 - Clear change in mapping
2989
2990 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2991
2992 hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2993 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2994 andc r5,r5,r0 ; Clear in PTE copy
2995 andc r8,r8,r0 ; and in the mapping
2996 cmpw r0,r0 ; Make sure we return CR0_EQ
2997 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2998 blr ; Return...
2999
3000
3001 ; Function 9 - Set reference in physent
3002
3003 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
3004
3005 hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3006
3007 hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
3008 ori r4,r4,lo16(ppR) ; Set the reference
3009 stwcx. r4,r5,r29 ; Try to stuff it
3010 bne-- hwpSRefPhX ; Try again...
3011 ; Note: CR0_EQ is set because of stwcx.
3012 blr ; Return...
3013
3014
3015 ; Function 10 - Set reference in mapping
3016
3017 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3018
3019 hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3020 ori r8,r8,lo16(mpR) ; Set reference in mapping
3021 cmpw r0,r0 ; Make sure we return CR0_EQ
3022 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3023 blr ; Return...
3024
3025 ; Function 11 - Set change in physent
3026
3027 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
3028
3029 hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3030
3031 hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3032 ori r4,r4,lo16(ppC) ; Set the change bit
3033 stwcx. r4,r5,r29 ; Try to stuff it
3034 bne-- hwpSCngPhX ; Try again...
3035 ; Note: CR0_EQ is set because of stwcx.
3036 blr ; Return...
3037
3038 ; Function 12 - Set change in mapping
3039
3040 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
3041
3042 hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3043 ori r8,r8,lo16(mpC) ; Set chage in mapping
3044 cmpw r0,r0 ; Make sure we return CR0_EQ
3045 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3046 blr ; Return...
3047
3048 ; Function 13 - Test reference in physent
3049
3050 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3051
3052 hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3053 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3054 blr ; Return (CR0_EQ set to continue if reference is off)...
3055
3056
3057 ; Function 14 - Test reference in mapping
3058
3059 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
3060
3061 hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3062 blr ; Return (CR0_EQ set to continue if reference is off)...
3063
3064
3065 ; Function 15 - Test change in physent
3066
3067 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
3068
3069 hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3070 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
3071 blr ; Return (CR0_EQ set to continue if change is off)...
3072
3073
3074 ; Function 16 - Test change in mapping
3075
3076 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
3077
3078 hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
3079 blr ; Return (CR0_EQ set to continue if change is off)...
3080
3081
3082 ; Function 17 - Test reference and change in physent
3083
3084 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3085
3086 hwpTRefCngPhy:
3087 lwz r0,ppLink+4(r29) ; Get the flags from physent
3088 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3089 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3090 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3091 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3092
3093
3094 ; Function 18 - Test reference and change in mapping
3095
3096 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3097 hwpTRefCngMap:
3098 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3099 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3100 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3101 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3102
3103
3104 ; Function 19 - Clear reference and change in physent
3105
3106 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3107 hwpCRefCngPhy:
3108 li r5,ppLink+4 ; Get offset for flag part of physent
3109
3110 hwpCRefCngPhX:
3111 lwarx r4,r5,r29 ; Get the old flags
3112 andc r4,r4,r25 ; Clear R and C as specified by mask
3113 stwcx. r4,r5,r29 ; Try to stuff it
3114 bne-- hwpCRefCngPhX ; Try again...
3115 ; Note: CR0_EQ is set because of stwcx.
3116 blr ; Return...
3117
3118
3119 ; Function 20 - Clear reference and change in mapping
3120
3121 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3122 hwpCRefCngMap:
3123 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3124 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3125 andc r5,r5,r0 ; Clear in PTE copy
3126 andc r8,r8,r0 ; and in the mapping
3127 cmpw r0,r0 ; Make sure we return CR0_EQ
3128 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3129 blr ; Return...
3130
3131
3132 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
3133
3134 ;
3135 ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
3136 ;
3137 ; Returns:
3138 ; mapRtOK - if all is ok
3139 ; mapRtBadLk - if mapping lock fails
3140 ; mapRtPerm - if mapping is permanent
3141 ; mapRtNotFnd - if mapping is not found
3142 ; mapRtBlock - if mapping is a block
3143 ;
3144 .align 5
3145 .globl EXT(hw_protect)
3146
3147 LEXT(hw_protect)
3148 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3149 mflr r0 ; Save the link register
3150 stw r24,FM_ARG0+0x00(r1) ; Save a register
3151 stw r25,FM_ARG0+0x04(r1) ; Save a register
3152 mr r25,r7 ; Remember address of next va
3153 stw r26,FM_ARG0+0x08(r1) ; Save a register
3154 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3155 stw r28,FM_ARG0+0x10(r1) ; Save a register
3156 mr r24,r6 ; Save the new protection flags
3157 stw r29,FM_ARG0+0x14(r1) ; Save a register
3158 stw r30,FM_ARG0+0x18(r1) ; Save a register
3159 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3160 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3161
3162 #if DEBUG
3163 lwz r11,pmapFlags(r3) ; Get pmaps flags
3164 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3165 bne hpPanic ; Call not valid for guest shadow assist pmap
3166 #endif
3167
3168 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3169 lwz r7,pmapvr+4(r3) ; Get the second part
3170
3171
3172 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3173
3174 mr r27,r11 ; Remember the old MSR
3175 mr r26,r12 ; Remember the feature bits
3176
3177 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3178
3179 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3180
3181 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3182
3183 hpSF1: mr r29,r4 ; Save top half of vaddr
3184 mr r30,r5 ; Save the bottom half
3185
3186 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3187 bl sxlkShared ; Go get a shared lock on the mapping lists
3188 mr. r3,r3 ; Did we get the lock?
3189 bne-- hpBadLock ; Nope...
3190
3191 mr r3,r28 ; get the pmap address
3192 mr r4,r29 ; Get bits 0:31 to look for
3193 mr r5,r30 ; Get bits 32:64
3194
3195 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
3196
3197 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3198 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3199 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3200 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3201 mr. r31,r3 ; Save the mapping if we found it
3202 mr r29,r4 ; Save next va high half
3203 mr r30,r5 ; Save next va low half
3204
3205 beq-- hpNotFound ; Not found...
3206
3207 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
3208
3209 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3210
3211 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3212
3213 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
3214 mr. r3,r3 ; Was there a previously valid PTE?
3215
3216 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3217
3218 beq-- hpNoOld32 ; Nope...
3219
3220 stw r5,4(r3) ; Store second half of PTE
3221 eieio ; Make sure we do not reorder
3222 stw r4,0(r3) ; Revalidate the PTE
3223
3224 eieio ; Make sure all updates come first
3225 stw r6,0(r7) ; Unlock PCA
3226
3227 hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3228 bl sxlkUnlock ; Unlock the search list
3229
3230 li r3,mapRtOK ; Set normal return
3231 b hpR32 ; Join common...
3232
3233 .align 5
3234
3235
3236 hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3237
3238 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
3239 mr. r3,r3 ; Was there a previously valid PTE?
3240
3241 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3242
3243 beq-- hpNoOld64 ; Nope...
3244
3245 std r5,8(r3) ; Store second half of PTE
3246 eieio ; Make sure we do not reorder
3247 std r4,0(r3) ; Revalidate the PTE
3248
3249 eieio ; Make sure all updates come first
3250 stw r6,0(r7) ; Unlock PCA
3251
3252 hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3253 bl sxlkUnlock ; Unlock the search list
3254
3255 li r3,mapRtOK ; Set normal return
3256 b hpR64 ; Join common...
3257
3258 .align 5
3259
3260 hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3261
3262 hpR32: mtmsr r27 ; Restore enables/translation/etc.
3263 isync
3264 b hpReturnC ; Join common...
3265
3266 hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3267 isync
3268
3269 hpReturnC: stw r29,0(r25) ; Save the top of the next va
3270 stw r30,4(r25) ; Save the bottom of the next va
3271 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3272 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3273 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3274 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3275 mtlr r0 ; Restore the return
3276 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3277 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3278 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3279 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3280 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3281 lwz r1,0(r1) ; Pop the stack
3282 blr ; Leave...
3283
3284 .align 5
3285
3286 hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3287 b hpReturn ; Leave....
3288
3289 hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3290 bl sxlkUnlock ; Unlock the search list
3291
3292 li r3,mapRtNotFnd ; Set that we did not find the requested page
3293 b hpReturn ; Leave....
3294
3295 hpNotAllowed:
3296 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3297 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3298 bne-- hpNotFound ; Yeah...
3299 bl sxlkUnlock ; Unlock the search list
3300
3301 li r3,mapRtBlock ; Assume it was a block
3302 rlwinm r0,r7,0,mpType ; Isolate mapping type
3303 cmplwi r0,mpBlock ; Is this a block mapping?
3304 beq++ hpReturn ; Yes, leave...
3305
3306 li r3,mapRtPerm ; Set that we hit a permanent page
3307 b hpReturn ; Leave....
3308
3309 hpPanic: lis r0,hi16(Choke) ; System abend
3310 ori r0,r0,lo16(Choke) ; System abend
3311 li r3,failMapping ; Show that we failed some kind of mapping thing
3312 sc
3313
3314
3315 ;
3316 ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3317 ;
3318 ; Returns following code ORed with RC from mapping
3319 ; mapRtOK - if all is ok
3320 ; mapRtBadLk - if mapping lock fails
3321 ; mapRtNotFnd - if mapping is not found
3322 ;
3323 .align 5
3324 .globl EXT(hw_test_rc)
3325
3326 LEXT(hw_test_rc)
3327 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3328 mflr r0 ; Save the link register
3329 stw r24,FM_ARG0+0x00(r1) ; Save a register
3330 stw r25,FM_ARG0+0x04(r1) ; Save a register
3331 stw r26,FM_ARG0+0x08(r1) ; Save a register
3332 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3333 stw r28,FM_ARG0+0x10(r1) ; Save a register
3334 mr r24,r6 ; Save the reset request
3335 stw r29,FM_ARG0+0x14(r1) ; Save a register
3336 stw r30,FM_ARG0+0x18(r1) ; Save a register
3337 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3338 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3339
3340 #if DEBUG
3341 lwz r11,pmapFlags(r3) ; Get pmaps flags
3342 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3343 bne htrPanic ; Call not valid for guest shadow assist pmap
3344 #endif
3345
3346 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3347 lwz r7,pmapvr+4(r3) ; Get the second part
3348
3349
3350 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3351
3352 mr r27,r11 ; Remember the old MSR
3353 mr r26,r12 ; Remember the feature bits
3354
3355 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3356
3357 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
3358
3359 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3360
3361 htrSF1: mr r29,r4 ; Save top half of vaddr
3362 mr r30,r5 ; Save the bottom half
3363
3364 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3365 bl sxlkShared ; Go get a shared lock on the mapping lists
3366 mr. r3,r3 ; Did we get the lock?
3367 li r25,0 ; Clear RC
3368 bne-- htrBadLock ; Nope...
3369
3370 mr r3,r28 ; get the pmap address
3371 mr r4,r29 ; Get bits 0:31 to look for
3372 mr r5,r30 ; Get bits 32:64
3373
3374 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
3375
3376 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3377 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3378 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3379 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3380 mr. r31,r3 ; Save the mapping if we found it
3381 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
3382
3383 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
3384
3385 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3386
3387 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3388
3389 cmplwi cr1,r24,0 ; Do we want to clear RC?
3390 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3391 mr. r3,r3 ; Was there a previously valid PTE?
3392 li r0,lo16(mpR|mpC) ; Get bits to clear
3393
3394 and r25,r5,r0 ; Save the RC bits
3395 beq++ cr1,htrNoClr32 ; Nope...
3396
3397 andc r12,r12,r0 ; Clear mapping copy of RC
3398 andc r5,r5,r0 ; Clear PTE copy of RC
3399 sth r12,mpVAddr+6(r31) ; Set the new RC
3400
3401 htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
3402
3403 sth r5,6(r3) ; Store updated RC
3404 eieio ; Make sure we do not reorder
3405 stw r4,0(r3) ; Revalidate the PTE
3406
3407 eieio ; Make sure all updates come first
3408 stw r6,0(r7) ; Unlock PCA
3409
3410 htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3411 bl sxlkUnlock ; Unlock the search list
3412 li r3,mapRtOK ; Set normal return
3413 b htrR32 ; Join common...
3414
3415 .align 5
3416
3417
3418 htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3419
3420 cmplwi cr1,r24,0 ; Do we want to clear RC?
3421 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3422 mr. r3,r3 ; Was there a previously valid PTE?
3423 li r0,lo16(mpR|mpC) ; Get bits to clear
3424
3425 and r25,r5,r0 ; Save the RC bits
3426 beq++ cr1,htrNoClr64 ; Nope...
3427
3428 andc r12,r12,r0 ; Clear mapping copy of RC
3429 andc r5,r5,r0 ; Clear PTE copy of RC
3430 sth r12,mpVAddr+6(r31) ; Set the new RC
3431
3432 htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3433
3434 sth r5,14(r3) ; Store updated RC
3435 eieio ; Make sure we do not reorder
3436 std r4,0(r3) ; Revalidate the PTE
3437
3438 eieio ; Make sure all updates come first
3439 stw r6,0(r7) ; Unlock PCA
3440
3441 htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3442 bl sxlkUnlock ; Unlock the search list
3443 li r3,mapRtOK ; Set normal return
3444 b htrR64 ; Join common...
3445
3446 .align 5
3447
3448 htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
3449
3450 htrR32: mtmsr r27 ; Restore enables/translation/etc.
3451 isync
3452 b htrReturnC ; Join common...
3453
3454 htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3455 isync
3456
3457 htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3458 or r3,r3,r25 ; Send the RC bits back
3459 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3460 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3461 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3462 mtlr r0 ; Restore the return
3463 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3464 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3465 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3466 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3467 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3468 lwz r1,0(r1) ; Pop the stack
3469 blr ; Leave...
3470
3471 .align 5
3472
3473 htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3474 b htrReturn ; Leave....
3475
3476 htrNotFound:
3477 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3478 bl sxlkUnlock ; Unlock the search list
3479
3480 li r3,mapRtNotFnd ; Set that we did not find the requested page
3481 b htrReturn ; Leave....
3482
3483 htrPanic: lis r0,hi16(Choke) ; System abend
3484 ori r0,r0,lo16(Choke) ; System abend
3485 li r3,failMapping ; Show that we failed some kind of mapping thing
3486 sc
3487
3488
3489 ;
3490 ;
3491 ; mapFindLockPN - find and lock physent for a given page number
3492 ;
3493 ;
3494 .align 5
3495 mapFindLockPN:
3496 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3497 mr r2,r3 ; Save our target
3498 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3499
3500 mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3501 lwz r5,mrStart(r9) ; Get start of table entry
3502 lwz r0,mrEnd(r9) ; Get end of table entry
3503 addi r9,r9,mrSize ; Point to the next slot
3504 cmplwi cr7,r3,0 ; Are we at the end of the table?
3505 cmplw r2,r5 ; See if we are in this table
3506 cmplw cr1,r2,r0 ; Check end also
3507 sub r4,r2,r5 ; Calculate index to physical entry
3508 beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry...
3509 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3510 slwi r4,r4,3 ; Get offset to physical entry
3511
3512 blt-- mapFLPNitr ; Did not find it...
3513
3514 add r3,r3,r4 ; Point right to the slot
3515 b mapPhysLock ; Join common lock code
3516
3517 mapFLPNmiss:
3518 li r3,0 ; Show that we did not find it
3519 blr ; Leave...
3520
3521
3522 ;
3523 ; mapPhysFindLock - find physent list and lock it
3524 ; R31 points to mapping
3525 ;
3526 .align 5
3527
3528 mapPhysFindLock:
3529 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3530 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
3531 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
3532 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3533 add r3,r3,r4 ; Point to table entry
3534 lwz r5,mpPAddr(r31) ; Get physical page number
3535 lwz r7,mrStart(r3) ; Get the start of range
3536 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3537 sub r6,r5,r7 ; Get index to physent
3538 rlwinm r6,r6,3,0,28 ; Get offset to physent
3539 add r3,r3,r6 ; Point right to the physent
3540 b mapPhysLock ; Join in the lock...
3541
3542 ;
3543 ; mapPhysLock - lock a physent list
3544 ; R3 contains list header
3545 ;
3546 .align 5
3547
3548 mapPhysLockS:
3549 li r2,lgKillResv ; Get a spot to kill reservation
3550 stwcx. r2,0,r2 ; Kill it...
3551
3552 mapPhysLockT:
3553 lwz r2,ppLink(r3) ; Get physent chain header
3554 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3555 bne-- mapPhysLockT ; Nope, still locked...
3556
3557 mapPhysLock:
3558 lwarx r2,0,r3 ; Get the lock
3559 rlwinm. r0,r2,0,0,0 ; Is it locked?
3560 oris r0,r2,0x8000 ; Set the lock bit
3561 bne-- mapPhysLockS ; It is locked, spin on it...
3562 stwcx. r0,0,r3 ; Try to stuff it back...
3563 bne-- mapPhysLock ; Collision, try again...
3564 isync ; Clear any speculations
3565 blr ; Leave...
3566
3567
3568 ;
3569 ; mapPhysUnlock - unlock a physent list
3570 ; R3 contains list header
3571 ;
3572 .align 5
3573
3574 mapPhysUnlock:
3575 lwz r0,ppLink(r3) ; Get physent chain header
3576 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3577 eieio ; Make sure unlock comes last
3578 stw r0,ppLink(r3) ; Unlock the list
3579 blr
3580
3581 ;
3582 ; mapPhysMerge - merge the RC bits into the master copy
3583 ; R3 points to the physent
3584 ; R4 contains the RC bits
3585 ;
3586 ; Note: we just return if RC is 0
3587 ;
3588 .align 5
3589
3590 mapPhysMerge:
3591 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3592 la r5,ppLink+4(r3) ; Point to the RC field
3593 beqlr-- ; Leave if RC is 0...
3594
3595 mapPhysMergeT:
3596 lwarx r6,0,r5 ; Get the RC part
3597 or r6,r6,r4 ; Merge in the RC
3598 stwcx. r6,0,r5 ; Try to stuff it back...
3599 bne-- mapPhysMergeT ; Collision, try again...
3600 blr ; Leave...
3601
3602 ;
3603 ; Sets the physent link pointer and preserves all flags
3604 ; The list is locked
3605 ; R3 points to physent
3606 ; R4 has link to set
3607 ;
3608
3609 .align 5
3610
3611 mapPhyCSet32:
3612 la r5,ppLink+4(r3) ; Point to the link word
3613
3614 mapPhyCSetR:
3615 lwarx r2,0,r5 ; Get the link and flags
3616 rlwimi r4,r2,0,ppFlags ; Insert the flags
3617 stwcx. r4,0,r5 ; Stick them back
3618 bne-- mapPhyCSetR ; Someone else did something, try again...
3619 blr ; Return...
3620
3621 .align 5
3622
3623 mapPhyCSet64:
3624 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3625 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
3626
3627 mapPhyCSet64x:
3628 ldarx r2,0,r3 ; Get the link and flags
3629 and r5,r2,r0 ; Isolate the flags
3630 or r6,r4,r5 ; Add them to the link
3631 stdcx. r6,0,r3 ; Stick them back
3632 bne-- mapPhyCSet64x ; Someone else did something, try again...
3633 blr ; Return...
3634
3635 ;
3636 ; mapBumpBusy - increment the busy count on a mapping
3637 ; R3 points to mapping
3638 ;
3639
3640 .align 5
3641
3642 mapBumpBusy:
3643 lwarx r4,0,r3 ; Get mpBusy
3644 addis r4,r4,0x0100 ; Bump the busy count
3645 stwcx. r4,0,r3 ; Save it back
3646 bne-- mapBumpBusy ; This did not work, try again...
3647 blr ; Leave...
3648
3649 ;
3650 ; mapDropBusy - increment the busy count on a mapping
3651 ; R3 points to mapping
3652 ;
3653
3654 .globl EXT(mapping_drop_busy)
3655 .align 5
3656
3657 LEXT(mapping_drop_busy)
3658 mapDropBusy:
3659 lwarx r4,0,r3 ; Get mpBusy
3660 addis r4,r4,0xFF00 ; Drop the busy count
3661 stwcx. r4,0,r3 ; Save it back
3662 bne-- mapDropBusy ; This did not work, try again...
3663 blr ; Leave...
3664
3665 ;
3666 ; mapDrainBusy - drain the busy count on a mapping
3667 ; R3 points to mapping
3668 ; Note: we already have a busy for ourselves. Only one
3669 ; busy per processor is allowed, so we just spin here
3670 ; waiting for the count to drop to 1.
3671 ; Also, the mapping can not be on any lists when we do this
3672 ; so all we are doing is waiting until it can be released.
3673 ;
3674
3675 .align 5
3676
3677 mapDrainBusy:
3678 lwz r4,mpFlags(r3) ; Get mpBusy
3679 rlwinm r4,r4,8,24,31 ; Clean it up
3680 cmplwi r4,1 ; Is is just our busy?
3681 beqlr++ ; Yeah, it is clear...
3682 b mapDrainBusy ; Try again...
3683
3684
3685
3686 ;
3687 ; handleDSeg - handle a data segment fault
3688 ; handleISeg - handle an instruction segment fault
3689 ;
3690 ; All that we do here is to map these to DSI or ISI and insure
3691 ; that the hash bit is not set. This forces the fault code
3692 ; to also handle the missing segment.
3693 ;
3694 ; At entry R2 contains per_proc, R13 contains savarea pointer,
3695 ; and R11 is the exception code.
3696 ;
3697
3698 .align 5
3699 .globl EXT(handleDSeg)
3700
3701 LEXT(handleDSeg)
3702
3703 li r11,T_DATA_ACCESS ; Change fault to DSI
3704 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3705 b EXT(handlePF) ; Join common...
3706
3707 .align 5
3708 .globl EXT(handleISeg)
3709
3710 LEXT(handleISeg)
3711
3712 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3713 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3714 b EXT(handlePF) ; Join common...
3715
3716
3717 /*
3718 * handlePF - handle a page fault interruption
3719 *
3720 * At entry R2 contains per_proc, R13 contains savarea pointer,
3721 * and R11 is the exception code.
3722 *
3723 * This first part does a quick check to see if we can handle the fault.
3724 * We canot handle any kind of protection exceptions here, so we pass
3725 * them up to the next level.
3726 *
3727 * NOTE: In order for a page-fault redrive to work, the translation miss
3728 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3729 * before we come here.
3730 */
3731
3732 .align 5
3733 .globl EXT(handlePF)
3734
3735 LEXT(handlePF)
3736
3737 mfsprg r12,2 ; Get feature flags
3738 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3739 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3740 mtcrf 0x02,r12 ; move pf64Bit to cr6
3741 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3742 lwz r18,SAVflags(r13) ; Get the flags
3743
3744 beq-- gotIfetch ; We have an IFETCH here...
3745
3746 lwz r27,savedsisr(r13) ; Get the DSISR
3747 lwz r29,savedar(r13) ; Get the first half of the DAR
3748 lwz r30,savedar+4(r13) ; And second half
3749
3750 b ckIfProt ; Go check if this is a protection fault...
3751
3752 gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3753 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3754 lwz r30,savesrr0+4(r13) ; And second half
3755 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3756
3757 ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3758 li r20,64 ; Set a limit of 64 nests for sanity check
3759 bne-- hpfExit ; Yes... (probably not though)
3760
3761 ;
3762 ; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3763 ; should be loading the user pmap here.
3764 ;
3765
3766 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3767 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3768 mr r19,r2 ; Remember the per_proc
3769 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3770 mr r23,r30 ; Save the low part of faulting address
3771 beq-- hpfInKern ; Skip if we are in the kernel
3772 la r8,ppUserPmap(r19) ; Point to the current user pmap
3773
3774 hpfInKern: mr r22,r29 ; Save the high part of faulting address
3775
3776 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3777
3778 ;
3779 ; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3780 ; predefined value that corresponds to no address space. When we see that value
3781 ; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3782 ; cause the proper SR to be loaded.
3783 ;
3784
3785 lwz r28,4(r8) ; Pick up the pmap
3786 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3787 mr r25,r28 ; Save the original pmap (in case we nest)
3788 lwz r0,pmapFlags(r28) ; Get pmap's flags
3789 bne hpfGVtest ; Segs are not ours if so...
3790 mfsrin r4,r30 ; Get the SR that was used for translation
3791 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3792 bne++ hpfGVtest ; No...
3793
3794 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3795 b hpfGVtest ; Join on up...
3796
3797 .align 5
3798
3799 nop ; Push hpfNest to a 32-byte boundary
3800 nop ; Push hpfNest to a 32-byte boundary
3801 nop ; Push hpfNest to a 32-byte boundary
3802
3803 hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3804 mr r25,r28 ; Save the original pmap (in case we nest)
3805 lwz r0,pmapFlags(r28) ; Get pmap's flags
3806
3807 hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3808 bne hpfGVxlate ; Yup, do accelerated shadow stuff
3809
3810 ;
3811 ; This is where we loop descending nested pmaps
3812 ;
3813
3814 hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3815 addi r20,r20,-1 ; Count nest try
3816 bl sxlkShared ; Go get a shared lock on the mapping lists
3817 mr. r3,r3 ; Did we get the lock?
3818 bne-- hpfBadLock ; Nope...
3819
3820 mr r3,r28 ; Get the pmap pointer
3821 mr r4,r22 ; Get top of faulting vaddr
3822 mr r5,r23 ; Get bottom of faulting vaddr
3823 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3824
3825 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3826 mr. r31,r3 ; Save the mapping if we found it
3827 cmplwi cr1,r0,0 ; Check for removal
3828 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3829
3830 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3831
3832 rlwinm r0,r7,0,mpType ; Isolate mapping type
3833 cmplwi r0,mpNest ; Are we again nested?
3834 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3835 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
3836 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3837
3838 lhz r21,mpSpace(r31) ; Get the space
3839
3840 bne++ hpfFoundIt ; No, we found our guy...
3841
3842
3843 #if pmapTransSize != 12
3844 #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3845 #endif
3846 cmplwi r0,mpLinkage ; Linkage mapping?
3847 cmplwi cr1,r20,0 ; Too many nestings?
3848 beq-- hpfSpclNest ; Do we need to do special handling?
3849
3850 hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3851 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3852 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3853 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3854 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3855 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3856 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3857 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3858 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3859 slwi r11,r21,3 ; Multiply space by 8
3860 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3861 addc r23,r23,r9 ; Relocate bottom half of vaddr
3862 lwz r10,0(r10) ; Get the actual translation map
3863 slwi r12,r21,2 ; Multiply space by 4
3864 add r10,r10,r11 ; Add in the higher part of the index
3865 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3866 adde r22,r22,r8 ; Relocate the top half of the vaddr
3867 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3868 bl sxlkUnlock ; Unlock the search list
3869
3870 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
3871 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3872 cmplwi r28,0 ; Is the pmap paddr valid?
3873 bne+ hpfNest ; Nest into new pmap...
3874 b hpfBadPmap ; Handle bad pmap
3875
3876 hpfGetPmap64:
3877 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3878 cmpldi r28,0 ; Is the pmap paddr valid?
3879 bne++ hpfNest ; Nest into new pmap...
3880 b hpfBadPmap ; Handle bad pmap
3881
3882
3883 ;
3884 ; Error condition. We only allow 64 nestings. This keeps us from having to
3885 ; check for recusive nests when we install them.
3886 ;
3887
3888 .align 5
3889
3890 hpfNestTooMuch:
3891 lwz r20,savedsisr(r13) ; Get the DSISR
3892 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3893 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3894 ori r20,r20,1 ; Indicate that there was a nesting problem
3895 stw r20,savedsisr(r13) ; Stash it
3896 lwz r11,saveexception(r13) ; Restore the exception code
3897 b EXT(PFSExit) ; Yes... (probably not though)
3898
3899 ;
3900 ; Error condition - lock failed - this is fatal
3901 ;
3902
3903 .align 5
3904
3905 hpfBadLock:
3906 lis r0,hi16(Choke) ; System abend
3907 ori r0,r0,lo16(Choke) ; System abend
3908 li r3,failMapping ; Show mapping failure
3909 sc
3910
3911 ;
3912 ; Error condition - space id selected an invalid pmap - fatal
3913 ;
3914
3915 .align 5
3916
3917 hpfBadPmap:
3918 lis r0,hi16(Choke) ; System abend
3919 ori r0,r0,lo16(Choke) ; System abend
3920 li r3,failPmap ; Show invalid pmap
3921 sc
3922
3923 ;
3924 ; Did not find any kind of mapping
3925 ;
3926
3927 .align 5
3928
3929 hpfNotFound:
3930 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3931 bl sxlkUnlock ; Unlock it
3932 lwz r11,saveexception(r13) ; Restore the exception code
3933
3934 hpfExit: ; We need this because we can not do a relative branch
3935 b EXT(PFSExit) ; Yes... (probably not though)
3936
3937
3938 ;
3939 ; Here is where we handle special mappings. So far, the only use is to load a
3940 ; processor specific segment register for copy in/out handling.
3941 ;
3942 ; The only (so far implemented) special map is used for copyin/copyout.
3943 ; We keep a mapping of a "linkage" mapping in the per_proc.
3944 ; The linkage mapping is basically a nested pmap that is switched in
3945 ; as part of context switch. It relocates the appropriate user address
3946 ; space slice into the right place in the kernel.
3947 ;
3948
3949 .align 5
3950
3951 hpfSpclNest:
3952 la r31,ppUMWmp(r19) ; Just point to the mapping
3953 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
3954 b hpfCSrch ; Go continue search...
3955
3956
3957 ;
3958 ; We have now found a mapping for the address we faulted on.
3959 ;
3960
3961 ;
3962 ; Here we go about calculating what the VSID should be. We concatanate
3963 ; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3964 ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3965 ; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3966 ; the VSID.
3967 ;
3968 ; This is used both for segment handling and PTE handling
3969 ;
3970
3971
3972 #if maxAdrSpb != 14
3973 #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3974 #endif
3975
3976 ; Important non-volatile registers at this point ('home' means the final pmap/mapping found
3977 ; when a multi-level mapping has been successfully searched):
3978 ; r21: home space id number
3979 ; r22: relocated high-order 32 bits of vaddr
3980 ; r23: relocated low-order 32 bits of vaddr
3981 ; r25: pmap physical address
3982 ; r27: dsisr
3983 ; r28: home pmap physical address
3984 ; r29: high-order 32 bits of faulting vaddr
3985 ; r30: low-order 32 bits of faulting vaddr
3986 ; r31: mapping's physical address
3987
3988 .align 5
3989
3990 hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3991 hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3992 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3993 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3994 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3995 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
3996 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3997 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3998 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3999 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
4000 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
4001 xor r14,r14,r20 ; Calculate the top half of VSID
4002 xor r15,r15,r21 ; Calculate the bottom half of the VSID
4003 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
4004 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
4005 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
4006 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
4007 or r12,r12,r15 ; Add key into the bottom of VSID
4008 ;
4009 ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4010
4011 bne++ hpfPteMiss ; Nope, normal PTE miss...
4012
4013 ;
4014 ; Here is the only place that we make an entry in the pmap segment cache.
4015 ;
4016 ; Note that we do not make an entry in the segment cache for special
4017 ; nested mappings. This makes the copy in/out segment get refreshed
4018 ; when switching threads.
4019 ;
4020 ; The first thing that we do is to look up the ESID we are going to load
4021 ; into a segment in the pmap cache. If it is already there, this is
4022 ; a segment that appeared since the last time we switched address spaces.
4023 ; If all is correct, then it was another processors that made the cache
4024 ; entry. If not, well, it is an error that we should die on, but I have
4025 ; not figured a good way to trap it yet.
4026 ;
4027 ; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4028 ; an entry based on the generation number, update the cache entry, and
4029 ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4030 ; entries that correspond to the last 4 bits (32:35 for 64-bit and
4031 ; 0:3 for 32-bit) of the ESID.
4032 ;
4033 ; Then we unlock and bail.
4034 ;
4035 ; First lock it. Then select a free slot or steal one based on the generation
4036 ; number. Then store it, update the allocation flags, and unlock.
4037 ;
4038 ; The cache entry contains an image of the ESID/VSID pair we would load for
4039 ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4040 ;
4041 ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4042 ; the current one, which may have changed because we nested.
4043 ;
4044 ; Also remember that we do not store the valid bit in the ESID. If we
4045 ; od, this will break some other stuff.
4046 ;
4047
4048 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4049
4050 mr r3,r25 ; Point to the pmap
4051 mr r4,r29 ; ESID high half
4052 mr r5,r30 ; ESID low half
4053 bl pmapCacheLookup ; Go see if this is in the cache already
4054
4055 mr. r3,r3 ; Did we find it?
4056 mr r4,r11 ; Copy this to a different register
4057
4058 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4059
4060 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4061 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4062
4063 cntlzw r7,r4 ; Find a free slot
4064
4065 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4066 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4067 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4068 addi r5,r4,1 ; Bump the generation number
4069 and r7,r7,r6 ; Clear bit number if none empty
4070 andc r8,r4,r6 ; Clear generation count if we found an empty
4071 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4072 or r7,r7,r8 ; Select a slot number
4073 li r8,0 ; Clear
4074 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4075 oris r8,r8,0x8000 ; Get the high bit on
4076 la r9,pmapSegCache(r25) ; Point to the segment cache
4077 slwi r6,r7,4 ; Get index into the segment cache
4078 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4079 srw r8,r8,r7 ; Get the mask
4080 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4081 li r0,0 ; Clear
4082 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4083 oris r0,r0,0xF000 ; Get the sub-tag mask
4084 add r9,r9,r6 ; Point to the cache slot
4085 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4086 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4087
4088 stw r29,sgcESID(r9) ; Save the top of the ESID
4089 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4090 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4091 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4092 or r10,r10,r5 ; Stick in subtag in case top half
4093 or r11,r11,r5 ; Stick in subtag in case bottom half
4094 stw r14,sgcVSID(r9) ; Save the top of the VSID
4095 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4096 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4097 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4098
4099 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4100 b hpfNoCacheEnt ; Go finish up...
4101
4102 hpfSCSTbottom:
4103 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4104
4105
4106 hpfNoCacheEnt:
4107 eieio ; Make sure cache is updated before lock
4108 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4109
4110
4111 hpfNoCacheEnt2:
4112 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4113 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4114
4115 ;
4116 ; Make and enter 32-bit segment register
4117 ;
4118
4119 lwz r16,validSegs(r19) ; Get the valid SR flags
4120 xor r12,r12,r4 ; Alter the storage key before loading segment register
4121 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4122 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4123 lis r0,0x8000 ; Set bit 0
4124 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4125 srw r0,r0,r2 ; Get bit corresponding to SR
4126 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4127 or r16,r16,r0 ; Show that SR is valid
4128
4129 mtsrin r6,r30 ; Set the actual SR
4130
4131 stw r16,validSegs(r19) ; Set the valid SR flags
4132
4133 b hpfPteMiss ; SR loaded, go do a PTE...
4134
4135 ;
4136 ; Make and enter 64-bit segment look-aside buffer entry.
4137 ; Note that the cache entry is the right format except for valid bit.
4138 ; We also need to convert from long long to 64-bit register values.
4139 ;
4140
4141
4142 .align 5
4143
4144 hpfLoadSeg64:
4145 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4146 sldi r8,r29,32 ; Move high order address over
4147 sldi r10,r14,32 ; Move high part of VSID over
4148
4149 not r3,r16 ; Make valids be 0s
4150 li r0,1 ; Prepare to set bit 0
4151
4152 cntlzd r17,r3 ; Find a free SLB
4153 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4154 or r9,r8,r30 ; Form full 64-bit address
4155 cmplwi r17,63 ; Did we find a free SLB entry?
4156 sldi r0,r0,63 ; Get bit 0 set
4157 or r10,r10,r12 ; Move in low part and keys
4158 addi r17,r17,1 ; Skip SLB 0 always
4159 blt++ hpfFreeSeg ; Yes, go load it...
4160
4161 ;
4162 ; No free SLB entries, select one that is in use and invalidate it
4163 ;
4164 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4165 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4166 addi r4,r4,1 ; Set next slot to steal
4167 slbmfee r7,r17 ; Get the entry that is in the selected spot
4168 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4169 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4170 srawi r2,r2,31 ; Get -1 if steal index still in range
4171 slbie r7 ; Invalidate the in-use SLB entry
4172 and r4,r4,r2 ; Reset steal index when it should wrap
4173 isync ;
4174
4175 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4176 ;
4177 ; We are now ready to stick the SLB entry in the SLB and mark it in use
4178 ;
4179
4180 hpfFreeSeg:
4181 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4182 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4183 srd r0,r0,r4 ; Set bit mask for allocation
4184 oris r9,r9,0x0800 ; Turn on the valid bit
4185 or r16,r16,r0 ; Turn on the allocation flag
4186 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4187
4188 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4189 slbie r7 ; Blow away a potential duplicate
4190
4191 hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4192
4193 std r16,validSegs(r19) ; Mark as valid
4194 b hpfPteMiss ; STE loaded, go do a PTE...
4195
4196 ;
4197 ; The segment has been set up and loaded if need be. Now we are ready to build the
4198 ; PTE and get it into the hash table.
4199 ;
4200 ; Note that there is actually a race here. If we start fault processing on
4201 ; a different pmap, i.e., we have descended into a nested pmap, it is possible
4202 ; that the nest could have been removed from the original pmap. We would
4203 ; succeed with this translation anyway. I do not think we need to worry
4204 ; about this (famous last words) because nobody should be unnesting anything
4205 ; if there are still people activily using them. It should be up to the
4206 ; higher level VM system to put the kibosh on this.
4207 ;
4208 ; There is also another race here: if we fault on the same mapping on more than
4209 ; one processor at the same time, we could end up with multiple PTEs for the same
4210 ; mapping. This is not a good thing.... We really only need one of the
4211 ; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4212 ; the mapping. If we see that set, we just abandon the handler and hope that by
4213 ; the time we restore context and restart the interrupted code, the fault has
4214 ; been resolved by the other guy. If not, we will take another fault.
4215 ;
4216
4217 ;
4218 ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4219 ; It is required to stay there until after we call mapSelSlot!!!!
4220 ;
4221
4222 .align 5
4223
4224 hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4225 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4226 li r3,mpHValid ; Get the PTE valid bit
4227 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4228 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4229 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4230 and. r12,r12,r3 ; Isolate the valid bit
4231 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4232 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
4233 rlwinm r0,r2,0,mpType ; Isolate mapping type
4234 cmplwi r0,mpBlock ; Is this a block mapping?
4235 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
4236 stwcx. r2,0,r31 ; Store the flags
4237 bne-- hpfPteMiss ; Collision, try again...
4238
4239 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4240
4241 ;
4242 ; At this point we are about to do the 32-bit PTE generation.
4243 ;
4244 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4245 ;
4246 ; 1 2 3 4 4 5 6
4247 ; 0 8 6 4 2 0 8 6 3
4248 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4249 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4250 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4251 ;
4252 ; The 24 bits of the 32-bit architecture VSID is in the following:
4253 ;
4254 ; 1 2 3 4 4 5 6
4255 ; 0 8 6 4 2 0 8 6 3
4256 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4257 ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4258 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4259 ;
4260
4261
4262 hpfBldPTE32:
4263 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4264 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4265
4266 mfsdr1 r27 ; Get the hash table base address
4267
4268 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4269 rlwinm r18,r23,10,26,31 ; Extract the API
4270 xor r19,r15,r0 ; Calculate hash << 12
4271 mr r2,r25 ; Save the flag part of the mapping
4272 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4273 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4274 rlwinm r25,r25,0,0,19 ; Clear out the flags
4275 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4276 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4277 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4278 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4279 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4280 add r24,r24,r25 ; Adjust to true physical address
4281 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4282 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4283 and r19,r19,r16 ; Wrap hash table offset into the hash table
4284 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4285 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4286 add r19,r19,r27 ; Point to the PTEG
4287 subfic r20,r20,-4 ; Get negative offset to PCA
4288 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4289 add r20,r20,r27 ; Point to the PCA slot
4290
4291 ;
4292 ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4293 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4294 ;
4295 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4296 ; that some other processor beat us and stuck in a PTE or that
4297 ; all we had was a simple segment exception and the PTE was there the whole time.
4298 ; If we find one a pointer, we are done.
4299 ;
4300
4301 mr r7,r20 ; Copy the PCA pointer
4302 bl mapLockPteg ; Lock the PTEG
4303
4304 lwz r12,mpPte(r31) ; Get the offset to the PTE
4305 mr r17,r6 ; Remember the PCA image
4306 mr r16,r6 ; Prime the post-select PCA image
4307 andi. r0,r12,mpHValid ; Is there a PTE here already?
4308 li r21,8 ; Get the number of slots
4309
4310 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4311
4312 bne- hpfBailOut ; Someone already did this for us...
4313
4314 ;
4315 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
4316 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4317 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4318 ; R4 returns the slot index.
4319 ;
4320 ; REMEMBER: CR7 indicates that we are building a block mapping.
4321 ;
4322
4323 hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4324 mr r6,r17 ; Get back the original PCA
4325 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4326 blt- hpfBailOut ; Holy Cow, all slots are locked...
4327
4328 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4329
4330 cmplwi cr5,r3,1 ; Did we steal a slot?
4331 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
4332 mr r16,r6 ; Remember the PCA image after selection
4333 blt+ cr5,hpfInser32 ; Nope, no steal...
4334
4335 lwz r6,0(r19) ; Get the old PTE
4336 lwz r7,4(r19) ; Get the real part of the stealee
4337 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4338 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4339 srwi r3,r7,12 ; Change phys address to a ppnum
4340 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4341 cmplwi cr1,r3,0 ; Check if this is in RAM
4342 bne- hpfNoPte32 ; Could not get it, try for another...
4343
4344 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4345
4346 hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4347
4348 sync ; Make sure the invalid is stored
4349 li r9,tlbieLock ; Get the TLBIE lock
4350 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4351
4352 hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4353 mfsprg r4,0 ; Get the per_proc
4354 rlwinm r8,r6,25,18,31 ; Extract the space ID
4355 rlwinm r11,r6,25,18,31 ; Extract the space ID
4356 lwz r7,hwSteals(r4) ; Get the steal count
4357 srwi r2,r6,7 ; Align segment number with hash
4358 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4359 mr. r0,r0 ; Is it locked?
4360 srwi r0,r19,6 ; Align PTEG offset for back hash
4361 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4362 xor r11,r11,r0 ; Hash backwards to partial vaddr
4363 rlwinm r12,r2,14,0,3 ; Shift segment up
4364 mfsprg r2,2 ; Get feature flags
4365 li r0,1 ; Get our lock word
4366 rlwimi r12,r6,22,4,9 ; Move up the API
4367 bne- hpfTLBIE32 ; It is locked, go wait...
4368 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4369
4370 stwcx. r0,0,r9 ; Try to get it
4371 bne- hpfTLBIE32 ; We was beat...
4372 addi r7,r7,1 ; Bump the steal count
4373
4374 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4375 li r0,0 ; Lock clear value
4376
4377 tlbie r12 ; Invalidate it everywhere
4378
4379
4380 beq- hpfNoTS32 ; Can not have MP on this machine...
4381
4382 eieio ; Make sure that the tlbie happens first
4383 tlbsync ; Wait for everyone to catch up
4384 sync ; Make sure of it all
4385
4386 hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
4387
4388 stw r7,hwSteals(r4) ; Save the steal count
4389 bgt cr5,hpfInser32 ; We just stole a block mapping...
4390
4391 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4392
4393 la r11,ppLink+4(r3) ; Point to the master RC copy
4394 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4395 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4396
4397 hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4398 or r0,r0,r2 ; Merge in the new RC
4399 stwcx. r0,0,r11 ; Try to stick it back
4400 bne- hpfMrgRC32 ; Try again if we collided...
4401
4402
4403 hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
4404 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4405
4406 lhz r10,mpSpace(r7) ; Get the space
4407 lwz r9,mpVAddr+4(r7) ; And the vaddr
4408 cmplw cr1,r10,r8 ; Is this one of ours?
4409 xor r9,r12,r9 ; Compare virtual address
4410 cmplwi r9,0x1000 ; See if we really match
4411 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4412 beq+ hpfFPnch2 ; Yes, found ours...
4413
4414 lwz r7,mpAlias+4(r7) ; Chain on to the next
4415 b hpfFPnch ; Check it out...
4416
4417 hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4418 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4419 bl mapPhysUnlock ; Unlock the physent now
4420
4421 hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4422
4423 stw r24,4(r19) ; Stuff in the real part of the PTE
4424 eieio ; Make sure this gets there first
4425
4426 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4427 mr r17,r16 ; Get the PCA image to save
4428 b hpfFinish ; Go join the common exit code...
4429
4430
4431 ;
4432 ; At this point we are about to do the 64-bit PTE generation.
4433 ;
4434 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4435 ;
4436 ; 1 2 3 4 4 5 6
4437 ; 0 8 6 4 2 0 8 6 3
4438 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4439 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4440 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4441 ;
4442 ;
4443
4444 .align 5
4445
4446 hpfBldPTE64:
4447 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4448 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4449
4450 mfsdr1 r27 ; Get the hash table base address
4451
4452 sldi r11,r22,32 ; Slide top of adjusted EA over
4453 sldi r14,r14,32 ; Slide top of VSID over
4454 rlwinm r5,r27,0,27,31 ; Isolate the size
4455 eqv r16,r16,r16 ; Get all foxes here
4456 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4457 mr r2,r10 ; Save the flag part of the mapping
4458 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4459 rldicr r27,r27,0,45 ; Clean up the hash table base
4460 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4461 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4462 subfic r5,r5,46 ; Get number of leading zeros
4463 xor r19,r0,r15 ; Calculate hash
4464 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4465 srd r16,r16,r5 ; Shift over to get length of table
4466 srdi r19,r19,5 ; Convert page offset to hash table offset
4467 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4468 rldicr r10,r10,0,51 ; Clear out flags
4469 sldi r24,r24,12 ; Change ppnum to physical address
4470 sub r11,r11,r10 ; Get the offset from the base mapping
4471 and r19,r19,r16 ; Wrap into hash table
4472 add r24,r24,r11 ; Get actual physical address of this page
4473 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4474 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4475 subfic r20,r20,-4 ; Get negative offset to PCA
4476 ori r24,r24,lo16(mpR) ; Force on the reference bit
4477 add r20,r20,r27 ; Point to the PCA slot
4478 add r19,r19,r27 ; Point to the PTEG
4479
4480 ;
4481 ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4482 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4483 ;
4484 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4485 ; that some other processor beat us and stuck in a PTE or that
4486 ; all we had was a simple segment exception and the PTE was there the whole time.
4487 ; If we find one a pointer, we are done.
4488 ;
4489
4490 mr r7,r20 ; Copy the PCA pointer
4491 bl mapLockPteg ; Lock the PTEG
4492
4493 lwz r12,mpPte(r31) ; Get the offset to the PTE
4494 mr r17,r6 ; Remember the PCA image
4495 mr r18,r6 ; Prime post-selection PCA image
4496 andi. r0,r12,mpHValid ; See if we have a PTE now
4497 li r21,8 ; Get the number of slots
4498
4499 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4500
4501 bne-- hpfBailOut ; Someone already did this for us...
4502
4503 ;
4504 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4505 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4506 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4507 ; R4 returns the slot index.
4508 ;
4509 ; REMEMBER: CR7 indicates that we are building a block mapping.
4510 ;
4511
4512 hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4513 mr r6,r17 ; Restore original state of PCA
4514 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4515 blt- hpfBailOut ; Holy Cow, all slots are locked...
4516
4517 bl mapSelSlot ; Go select a slot
4518
4519 cmplwi cr5,r3,1 ; Did we steal a slot?
4520 mr r18,r6 ; Remember the PCA image after selection
4521 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
4522 lwz r10,hwSteals(r2) ; Get the steal count
4523 blt++ cr5,hpfInser64 ; Nope, no steal...
4524
4525 ld r6,0(r19) ; Get the old PTE
4526 ld r7,8(r19) ; Get the real part of the stealee
4527 rldicr r6,r6,0,62 ; Clear the valid bit
4528 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4529 srdi r3,r7,12 ; Change page address to a page address
4530 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4531 cmplwi cr1,r3,0 ; Check if this is in RAM
4532 bne-- hpfNoPte64 ; Could not get it, try for another...
4533
4534 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4535
4536 hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4537 li r9,tlbieLock ; Get the TLBIE lock
4538
4539 srdi r11,r6,5 ; Shift VSID over for back hash
4540 mfsprg r4,0 ; Get the per_proc
4541 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4542 sync ; Make sure the invalid is stored
4543
4544 sldi r12,r6,16 ; Move AVPN to EA position
4545 sldi r11,r11,5 ; Move this to the page position
4546
4547 hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4548 mr. r0,r0 ; Is it locked?
4549 li r0,1 ; Get our lock word
4550 bne-- hpfTLBIE65 ; It is locked, go wait...
4551
4552 stwcx. r0,0,r9 ; Try to get it
4553 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4554 rldicl r8,r6,52,50 ; Isolate the address space ID
4555 bne-- hpfTLBIE64 ; We was beat...
4556 addi r10,r10,1 ; Bump the steal count
4557
4558 rldicl r11,r12,0,16 ; Clear cause the book says so
4559 li r0,0 ; Lock clear value
4560
4561 tlbie r11 ; Invalidate it everywhere
4562
4563 mr r7,r8 ; Get a copy of the space ID
4564 eieio ; Make sure that the tlbie happens first
4565 rldimi r7,r7,14,36 ; Copy address space to make hash value
4566 tlbsync ; Wait for everyone to catch up
4567 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
4568 srdi r2,r6,26 ; Shift original segment down to bottom
4569
4570 ptesync ; Make sure of it all
4571 xor r7,r7,r2 ; Compute original segment
4572 stw r0,tlbieLock(0) ; Clear the tlbie lock
4573
4574 stw r10,hwSteals(r4) ; Save the steal count
4575 bgt cr5,hpfInser64 ; We just stole a block mapping...
4576
4577 rldimi r12,r7,28,0 ; Insert decoded segment
4578 rldicl r4,r12,0,13 ; Trim to max supported address
4579
4580 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4581
4582 la r11,ppLink+4(r3) ; Point to the master RC copy
4583 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4584 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4585
4586 hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
4587 li r12,ppLFAmask ; Get mask to clean up alias pointer
4588 or r0,r0,r2 ; Merge in the new RC
4589 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
4590 stwcx. r0,0,r11 ; Try to stick it back
4591 bne-- hpfMrgRC64 ; Try again if we collided...
4592
4593 hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4594 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4595
4596 lhz r10,mpSpace(r7) ; Get the space
4597 ld r9,mpVAddr(r7) ; And the vaddr
4598 cmplw cr1,r10,r8 ; Is this one of ours?
4599 xor r9,r4,r9 ; Compare virtual address
4600 cmpldi r9,0x1000 ; See if we really match
4601 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4602 beq++ hpfFPnch2x ; Yes, found ours...
4603
4604 ld r7,mpAlias(r7) ; Chain on to the next
4605 b hpfFPnchx ; Check it out...
4606
4607 .align 5
4608
4609 hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4610 stwcx. r7,0,r7 ; Kill reservation
4611
4612 hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4613 mr. r0,r0 ; Is it locked?
4614 beq++ hpfTLBIE64 ; Yup, wait for it...
4615 b hpfTLBIE63 ; Nope, try again..
4616
4617
4618
4619 hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4620 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4621 bl mapPhysUnlock ; Unlock the physent now
4622
4623
4624 hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4625 eieio ; Make sure this gets there first
4626 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4627 mr r17,r18 ; Get the PCA image to set
4628 b hpfFinish ; Go join the common exit code...
4629
4630 hpfLostPhys:
4631 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4632 ori r0,r0,lo16(Choke) ; System abend
4633 sc
4634
4635 ;
4636 ; This is the common code we execute when we are finished setting up the PTE.
4637 ;
4638
4639 .align 5
4640
4641 hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4642 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4643 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4644 stw r4,mpPte(r31) ; Remember our PTE
4645
4646 hpfBailOut: eieio ; Make sure all updates come first
4647 stw r17,0(r20) ; Unlock and set the final PCA
4648
4649 ;
4650 ; This is where we go if we have started processing the fault, but find that someone
4651 ; else has taken care of it.
4652 ;
4653
4654 hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4655 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4656 sth r2,mpFlags+2(r31) ; Set it
4657
4658 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4659 bl sxlkUnlock ; Unlock the search list
4660
4661 li r11,T_IN_VAIN ; Say that it was handled
4662 b EXT(PFSExit) ; Leave...
4663
4664 ;
4665 ; This is where we go when we find that someone else
4666 ; is in the process of handling the fault.
4667 ;
4668
4669 hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4670 stwcx. r3,0,r3 ; Do it
4671
4672 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4673 bl sxlkUnlock ; Unlock the search list
4674
4675 li r11,T_IN_VAIN ; Say that it was handled
4676 b EXT(PFSExit) ; Leave...
4677
4678 ;
4679 ; Guest shadow assist -- page fault handler
4680 ;
4681 ; Here we handle a fault in a guest pmap that has the guest shadow mapping
4682 ; assist active. We locate the VMM pmap extension block, which contains an
4683 ; index over the discontiguous multi-page shadow hash table. The index
4684 ; corresponding to our vaddr is selected, and the selected group within
4685 ; that page is searched for a valid and active entry that contains
4686 ; our vaddr and space id. The search is pipelined, so that we may fetch
4687 ; the next slot while examining the current slot for a hit. The final
4688 ; search iteration is unrolled so that we don't fetch beyond the end of
4689 ; our group, which could have dire consequences depending upon where the
4690 ; physical hash page is located.
4691 ;
4692 ; The VMM pmap extension block occupies a page. Begining at offset 0, we
4693 ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4694 ; after the pmap_vmm_ext is the hash table physical address index, a
4695 ; linear list of 64-bit physical addresses of the pages that comprise
4696 ; the hash table.
4697 ;
4698 ; In the event that we succesfully locate a guest mapping, we re-join
4699 ; the page fault path at hpfGVfound with the mapping's address in r31;
4700 ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4701 ; a share of the pmap search lock for the host pmap with the host pmap's
4702 ; address in r28, the guest pmap's space id in r21, and the guest pmap's
4703 ; flags in r12.
4704 ;
4705
4706 .align 5
4707 hpfGVxlate:
4708 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4709
4710 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4711 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4712 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4713 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4714 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4715 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4716 lwz r6,vxsGpf(r11) ; Get guest fault count
4717
4718 srwi r3,r10,12 ; Form shadow hash:
4719 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4720 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4721 ; Form index offset from hash page number
4722 add r31,r31,r4 ; r31 <- hash page index entry
4723 lwz r31,4(r31) ; r31 <- hash page paddr
4724 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4725 ; r31 <- hash group paddr
4726
4727 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4728 bl sxlkShared ; Go get a shared lock on the mapping lists
4729 mr. r3,r3 ; Did we get the lock?
4730 bne- hpfBadLock ; Nope...
4731
4732 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4733 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4734 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4735 addi r6,r6,1 ; Increment guest fault count
4736 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4737 mtctr r0 ; in this group
4738 stw r6,vxsGpf(r11) ; Update guest fault count
4739 b hpfGVlp32
4740
4741 .align 5
4742 hpfGVlp32:
4743 mr r6,r3 ; r6 <- current mapping slot's flags
4744 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4745 mr r7,r4 ; r7 <- current mapping slot's space ID
4746 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4747 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4748 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4749 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4750 xor r7,r7,r21 ; Compare space ID
4751 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4752 xor r8,r8,r10 ; Compare virtual address
4753 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4754 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4755
4756 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4757 bdnz hpfGVlp32 ; Iterate
4758
4759 clrrwi r5,r5,12 ; Remove flags from virtual address
4760 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4761 xor r4,r4,r21 ; Compare space ID
4762 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4763 xor r5,r5,r10 ; Compare virtual address
4764 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4765 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4766
4767 b hpfGVmiss
4768
4769 .align 5
4770 hpfGV64:
4771 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4772 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4773 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4774 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4775 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4776 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4777 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4778 lwz r6,vxsGpf(r11) ; Get guest fault count
4779
4780 srwi r3,r10,12 ; Form shadow hash:
4781 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4782 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4783 ; Form index offset from hash page number
4784 add r31,r31,r4 ; r31 <- hash page index entry
4785 ld r31,0(r31) ; r31 <- hash page paddr
4786 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4787 ; r31 <- hash group paddr
4788
4789 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4790 bl sxlkShared ; Go get a shared lock on the mapping lists
4791 mr. r3,r3 ; Did we get the lock?
4792 bne-- hpfBadLock ; Nope...
4793
4794 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4795 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4796 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4797 addi r6,r6,1 ; Increment guest fault count
4798 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4799 mtctr r0 ; in this group
4800 stw r6,vxsGpf(r11) ; Update guest fault count
4801 b hpfGVlp64
4802
4803 .align 5
4804 hpfGVlp64:
4805 mr r6,r3 ; r6 <- current mapping slot's flags
4806 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4807 mr r7,r4 ; r7 <- current mapping slot's space ID
4808 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4809 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4810 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4811 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4812 xor r7,r7,r21 ; Compare space ID
4813 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4814 xor r8,r8,r10 ; Compare virtual address
4815 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4816 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4817
4818 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4819 bdnz hpfGVlp64 ; Iterate
4820
4821 clrrdi r5,r5,12 ; Remove flags from virtual address
4822 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4823 xor r4,r4,r21 ; Compare space ID
4824 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4825 xor r5,r5,r10 ; Compare virtual address
4826 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4827 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4828
4829 hpfGVmiss:
4830 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4831 addi r6,r6,1 ; Increment miss count
4832 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4833 b hpfNotFound
4834
4835 /*
4836 * hw_set_user_space(pmap)
4837 * hw_set_user_space_dis(pmap)
4838 *
4839 * Indicate whether memory space needs to be switched.
4840 * We really need to turn off interrupts here, because we need to be non-preemptable
4841 *
4842 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4843 * register usage here. The VMM switch code in vmachmon.s that calls this
4844 * know what registers are in use. Check that if these change.
4845 */
4846
4847
4848
4849 .align 5
4850 .globl EXT(hw_set_user_space)
4851
4852 LEXT(hw_set_user_space)
4853
4854 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4855 mfmsr r10 ; Get the current MSR
4856 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4857 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4858 andc r10,r10,r8 ; Turn off VEC, FP for good
4859 andc r9,r10,r9 ; Turn off EE also
4860 mtmsr r9 ; Disable them
4861 isync ; Make sure FP and vec are off
4862 mfsprg r6,1 ; Get the current activation
4863 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4864 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4865 mfsprg r4,2 ; The the feature flags
4866 lwz r7,pmapvr(r3) ; Get the v to r translation
4867 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4868 mtcrf 0x80,r4 ; Get the Altivec flag
4869 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4870 cmplw cr1,r3,r2 ; Same address space as before?
4871 stw r7,ppUserPmap(r6) ; Show our real pmap address
4872 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4873 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4874 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4875 mtmsr r10 ; Restore interruptions
4876 beqlr-- cr1 ; Leave if the same address space or not Altivec
4877
4878 dssall ; Need to kill all data streams if adrsp changed
4879 sync
4880 blr ; Return...
4881
4882 .align 5
4883 .globl EXT(hw_set_user_space_dis)
4884
4885 LEXT(hw_set_user_space_dis)
4886
4887 lwz r7,pmapvr(r3) ; Get the v to r translation
4888 mfsprg r4,2 ; The the feature flags
4889 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4890 mfsprg r6,1 ; Get the current activation
4891 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4892 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4893 mtcrf 0x80,r4 ; Get the Altivec flag
4894 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4895 cmplw cr1,r3,r2 ; Same address space as before?
4896 stw r7,ppUserPmap(r6) ; Show our real pmap address
4897 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4898 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4899 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4900 beqlr-- cr1 ; Leave if the same
4901
4902 dssall ; Need to kill all data streams if adrsp changed
4903 sync
4904 blr ; Return...
4905
4906 /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4907 *
4908 * Lock must already be held on mapping block list
4909 * returns 0 if all slots filled.
4910 * returns n if a slot is found and it is not the last
4911 * returns -n if a slot is found and it is the last
4912 * when n and -n are returned, the corresponding bit is cleared
4913 * the mapping is zeroed out before return
4914 *
4915 */
4916
4917 .align 5
4918 .globl EXT(mapalc1)
4919
4920 LEXT(mapalc1)
4921 lwz r4,mbfree(r3) ; Get the 1st mask
4922 lis r0,0x8000 ; Get the mask to clear the first free bit
4923 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4924 mr r12,r3 ; Save the block ptr
4925 cntlzw r3,r4 ; Get first 1-bit in 1st word
4926 srw. r9,r0,r3 ; Get bit corresponding to first free one
4927 cntlzw r10,r5 ; Get first free field in second word
4928 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4929 bne mapalc1f ; Found one in 1st word
4930
4931 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4932 li r3,0 ; assume failure return
4933 andc r5,r5,r9 ; Turn it off
4934 beqlr-- ; There are no 1 bits left...
4935 addi r3,r10,32 ; set the correct number
4936
4937 mapalc1f:
4938 or. r0,r4,r5 ; any more bits set?
4939 stw r4,mbfree(r12) ; update bitmasks
4940 stw r5,mbfree+4(r12)
4941
4942 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4943 addi r7,r6,32
4944 dcbz r6,r12 ; clear the 64-byte mapping
4945 dcbz r7,r12
4946
4947 bnelr++ ; return if another bit remains set
4948
4949 neg r3,r3 ; indicate we just returned the last bit
4950 blr
4951
4952
4953 /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4954 *
4955 * Lock must already be held on mapping block list
4956 * returns 0 if all slots filled.
4957 * returns n if a slot is found and it is not the last
4958 * returns -n if a slot is found and it is the last
4959 * when n and -n are returned, the corresponding bits are cleared
4960 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4961 * the mapping is zero'd out before return
4962 */
4963
4964 .align 5
4965 .globl EXT(mapalc2)
4966 LEXT(mapalc2)
4967 lwz r4,mbfree(r3) ; Get the first mask
4968 lis r0,0x8000 ; Get the mask to clear the first free bit
4969 lwz r5,mbfree+4(r3) ; Get the second mask
4970 mr r12,r3 ; Save the block ptr
4971 slwi r6,r4,1 ; shift first word over
4972 and r6,r4,r6 ; lite start of double bit runs in 1st word
4973 slwi r7,r5,1 ; shift 2nd word over
4974 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4975 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4976 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4977 cntlzw r10,r7 ; Get first free field in second word
4978 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4979 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4980 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4981 bne mapalc2a ; Found two consecutive free bits in 1st word
4982
4983 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4984 li r3,0 ; assume failure
4985 srwi r11,r9,1 ; get mask for 2nd bit
4986 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4987 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4988 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4989 addi r3,r10,32 ; set the correct number
4990
4991 mapalc2a:
4992 or. r0,r4,r5 ; any more bits set?
4993 stw r4,mbfree(r12) ; update bitmasks
4994 stw r5,mbfree+4(r12)
4995 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4996 addi r7,r6,32
4997 addi r8,r6,64
4998 addi r9,r6,96
4999 dcbz r6,r12 ; zero out the 128-byte mapping
5000 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
5001 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
5002 dcbz r9,r12
5003
5004 bnelr++ ; return if another bit remains set
5005
5006 neg r3,r3 ; indicate we just returned the last bit
5007 blr
5008
5009 mapalc2c:
5010 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5011 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5012 beqlr ; no, we failed
5013 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5014 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5015 li r3,31 ; get index of this field
5016 b mapalc2a
5017
5018
5019 ;
5020 ; This routine initialzes the hash table and PCA.
5021 ; It is done here because we may need to be 64-bit to do it.
5022 ;
5023
5024 .align 5
5025 .globl EXT(hw_hash_init)
5026
5027 LEXT(hw_hash_init)
5028
5029 mfsprg r10,2 ; Get feature flags
5030 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5031 mtcrf 0x02,r10 ; move pf64Bit to cr6
5032 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5033 lis r4,0xFF01 ; Set all slots free and start steal at end
5034 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5035 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5036
5037 lwz r12,0(r12) ; Get hash table size
5038 li r3,0 ; Get start
5039 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5040
5041 lwz r11,4(r11) ; Get hash table base
5042
5043 hhiNext32: cmplw r3,r12 ; Have we reached the end?
5044 bge- hhiCPCA32 ; Yes...
5045 dcbz r3,r11 ; Clear the line
5046 addi r3,r3,32 ; Next one...
5047 b hhiNext32 ; Go on...
5048
5049 hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5050 li r3,-4 ; Displacement to first PCA entry
5051 neg r12,r12 ; Get negative end of PCA
5052
5053 hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5054 subi r3,r3,4 ; Next slot
5055 cmpw r3,r12 ; Have we finished?
5056 bge+ hhiNPCA32 ; Not yet...
5057 blr ; Leave...
5058
5059 hhiSF: mfmsr r9 ; Save the MSR
5060 li r8,1 ; Get a 1
5061 mr r0,r9 ; Get a copy of the MSR
5062 ld r11,0(r11) ; Get hash table base
5063 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5064 mtmsrd r0 ; Turn on SF
5065 isync
5066
5067
5068 hhiNext64: cmpld r3,r12 ; Have we reached the end?
5069 bge-- hhiCPCA64 ; Yes...
5070 dcbz128 r3,r11 ; Clear the line
5071 addi r3,r3,128 ; Next one...
5072 b hhiNext64 ; Go on...
5073
5074 hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5075 li r3,-4 ; Displacement to first PCA entry
5076 neg r12,r12 ; Get negative end of PCA
5077
5078 hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5079 subi r3,r3,4 ; Next slot
5080 cmpd r3,r12 ; Have we finished?
5081 bge++ hhiNPCA64 ; Not yet...
5082
5083 mtmsrd r9 ; Turn off SF if it was off
5084 isync
5085 blr ; Leave...
5086
5087
5088 ;
5089 ; This routine sets up the hardware to start translation.
5090 ; Note that we do NOT start translation.
5091 ;
5092
5093 .align 5
5094 .globl EXT(hw_setup_trans)
5095
5096 LEXT(hw_setup_trans)
5097
5098 mfsprg r11,0 ; Get the per_proc block
5099 mfsprg r12,2 ; Get feature flags
5100 li r0,0 ; Get a 0
5101 li r2,1 ; And a 1
5102 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5103 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5104 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5105 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5106 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5107
5108 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5109
5110 li r9,0 ; Clear out a register
5111 sync
5112 isync
5113 mtdbatu 0,r9 ; Invalidate maps
5114 mtdbatl 0,r9 ; Invalidate maps
5115 mtdbatu 1,r9 ; Invalidate maps
5116 mtdbatl 1,r9 ; Invalidate maps
5117 mtdbatu 2,r9 ; Invalidate maps
5118 mtdbatl 2,r9 ; Invalidate maps
5119 mtdbatu 3,r9 ; Invalidate maps
5120 mtdbatl 3,r9 ; Invalidate maps
5121
5122 mtibatu 0,r9 ; Invalidate maps
5123 mtibatl 0,r9 ; Invalidate maps
5124 mtibatu 1,r9 ; Invalidate maps
5125 mtibatl 1,r9 ; Invalidate maps
5126 mtibatu 2,r9 ; Invalidate maps
5127 mtibatl 2,r9 ; Invalidate maps
5128 mtibatu 3,r9 ; Invalidate maps
5129 mtibatl 3,r9 ; Invalidate maps
5130
5131 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5132 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5133 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5134 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5135 lwz r11,4(r11) ; Get hash table base
5136 lwz r12,0(r12) ; Get hash table size
5137 subi r12,r12,1 ; Back off by 1
5138 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5139
5140 mtsdr1 r11 ; Ok, we now have the hash table set up
5141 sync
5142
5143 li r12,invalSpace ; Get the invalid segment value
5144 li r10,0 ; Start low
5145
5146 hstsetsr: mtsrin r12,r10 ; Set the SR
5147 addis r10,r10,0x1000 ; Bump the segment
5148 mr. r10,r10 ; Are we finished?
5149 bne+ hstsetsr ; Nope...
5150 sync
5151 blr ; Return...
5152
5153 ;
5154 ; 64-bit version
5155 ;
5156
5157 hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5158 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5159 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5160 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5161 ld r11,0(r11) ; Get hash table base
5162 lwz r12,0(r12) ; Get hash table size
5163 cntlzw r10,r12 ; Get the number of bits
5164 subfic r10,r10,13 ; Get the extra bits we need
5165 or r11,r11,r10 ; Add the size field to SDR1
5166
5167 mtsdr1 r11 ; Ok, we now have the hash table set up
5168 sync
5169
5170 li r0,0 ; Set an SLB slot index of 0
5171 slbia ; Trash all SLB entries (except for entry 0 that is)
5172 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5173 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5174 slbie r7 ; Invalidate it
5175
5176 blr ; Return...
5177
5178
5179 ;
5180 ; This routine turns on translation for the first time on a processor
5181 ;
5182
5183 .align 5
5184 .globl EXT(hw_start_trans)
5185
5186 LEXT(hw_start_trans)
5187
5188
5189 mfmsr r10 ; Get the msr
5190 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5191
5192 mtmsr r10 ; Everything falls apart here
5193 isync
5194
5195 blr ; Back to it.
5196
5197
5198
5199 ;
5200 ; This routine validates a segment register.
5201 ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5202 ;
5203 ; r3 = virtual pmap
5204 ; r4 = segment[0:31]
5205 ; r5 = segment[32:63]
5206 ; r6 = va[0:31]
5207 ; r7 = va[32:63]
5208 ;
5209 ; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5210 ; Note that there is no reason to apply the key modifier here because this is only
5211 ; used for kernel accesses.
5212 ;
5213
5214 .align 5
5215 .globl EXT(hw_map_seg)
5216
5217 LEXT(hw_map_seg)
5218
5219 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5220 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5221 mfsprg r10,2 ; Get feature flags
5222
5223 ;
5224 ; Note: the following code would problably be easier to follow if I split it,
5225 ; but I just wanted to see if I could write this to work on both 32- and 64-bit
5226 ; machines combined.
5227 ;
5228
5229 ;
5230 ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5231 ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5232
5233 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5234 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5235 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5236 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5237 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5238 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5239 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5240 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5241 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5242 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5243
5244 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5245 ; concatenated together. There is garbage
5246 ; at the top for 64-bit but we will clean
5247 ; that out later.
5248 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5249
5250
5251 ;
5252 ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5253 ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5254 ;
5255
5256 ;
5257 ; What we have now is:
5258 ;
5259 ; 0 0 1 2 3 4 4 5 6
5260 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5261 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5262 ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5263 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5264 ; 0 0 1 2 3 - for 32-bit machines
5265 ; 0 8 6 4 1
5266 ;
5267 ; 0 0 1 2 3 4 4 5 6
5268 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5269 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5270 ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5271 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5272 ; 0 0 1 2 3 - for 32-bit machines
5273 ; 0 8 6 4 1
5274 ;
5275 ; 0 0 1 2 3 4 4 5 6
5276 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5277 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5278 ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5279 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5280 ; 0 0 1 2 3 - for 32-bit machines
5281 ; 0 8 6 4 1
5282
5283
5284 xor r8,r8,r2 ; Calculate VSID
5285
5286 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
5287 mfsprg r12,0 ; Get the per_proc
5288 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5289 mfmsr r6 ; Get current MSR
5290 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5291 mtmsrd r0,1 ; Set only the EE bit to 0
5292 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5293 mfmsr r11 ; Get the MSR right now, after disabling EE
5294 andc r2,r11,r2 ; Turn off translation now
5295 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5296 or r11,r11,r6 ; Turn on the EE bit if it was on
5297 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5298 isync ; Hang out a bit
5299
5300 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5301 sldi r9,r9,9 ; Position the key and noex bit
5302
5303 rldimi r5,r8,12,0 ; Form the VSID/key
5304
5305 not r3,r6 ; Make valids be 0s
5306
5307 cntlzd r7,r3 ; Find a free SLB
5308 cmplwi r7,63 ; Did we find a free SLB entry?
5309
5310 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5311
5312 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5313 addi r7,r7,1 ; Make sure we skip slb 0
5314 blt++ hmsFreeSeg ; Yes, go load it...
5315
5316 ;
5317 ; No free SLB entries, select one that is in use and invalidate it
5318 ;
5319 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5320 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5321 addi r2,r2,1 ; Set next slot to steal
5322 slbmfee r3,r7 ; Get the entry that is in the selected spot
5323 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5324 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5325 srawi r8,r8,31 ; Get -1 if steal index still in range
5326 slbie r3 ; Invalidate the in-use SLB entry
5327 and r2,r2,r8 ; Reset steal index when it should wrap
5328 isync ;
5329
5330 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5331 ;
5332 ; We are now ready to stick the SLB entry in the SLB and mark it in use
5333 ;
5334
5335 hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5336 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5337 srd r0,r0,r2 ; Set bit mask for allocation
5338 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5339 or r6,r6,r0 ; Turn on the allocation flag
5340
5341 slbmte r5,r4 ; Make that SLB entry
5342
5343 std r6,validSegs(r12) ; Mark as valid
5344 mtmsrd r11 ; Restore the MSR
5345 isync
5346 blr ; Back to it...
5347
5348 .align 5
5349
5350 hms32bit:
5351 mfsprg r12,1 ; Get the current activation
5352 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5353 rlwinm r8,r8,0,8,31 ; Clean up the VSID
5354 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5355 lis r0,0x8000 ; Set bit 0
5356 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5357 srw r0,r0,r2 ; Get bit corresponding to SR
5358 addi r7,r12,validSegs ; Point to the valid segment flags directly
5359
5360 mtsrin r8,r4 ; Set the actual SR
5361 isync ; Need to make sure this is done
5362
5363 hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5364 or r6,r6,r0 ; Show that SR is valid
5365 stwcx. r6,0,r7 ; Set the valid SR flags
5366 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5367
5368 blr ; Back to it...
5369
5370
5371 ;
5372 ; This routine invalidates a segment register.
5373 ;
5374
5375 .align 5
5376 .globl EXT(hw_blow_seg)
5377
5378 LEXT(hw_blow_seg)
5379
5380 mfsprg r10,2 ; Get feature flags
5381 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5382
5383 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5384
5385 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5386
5387 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5388 mfmsr r6 ; Get current MSR
5389 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5390 mtmsrd r0,1 ; Set only the EE bit to 0
5391 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5392 mfmsr r11 ; Get the MSR right now, after disabling EE
5393 andc r2,r11,r2 ; Turn off translation now
5394 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5395 or r11,r11,r6 ; Turn on the EE bit if it was on
5396 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5397 isync ; Hang out a bit
5398
5399 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5400
5401 slbie r9 ; Invalidate the associated SLB entry
5402
5403 mtmsrd r11 ; Restore the MSR
5404 isync
5405 blr ; Back to it.
5406
5407 .align 5
5408
5409 hbs32bit:
5410 mfsprg r12,1 ; Get the current activation
5411 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5412 addi r7,r12,validSegs ; Point to the valid segment flags directly
5413 lwarx r4,0,r7 ; Get and reserve the valid segment flags
5414 rlwinm r6,r9,4,28,31 ; Convert segment to number
5415 lis r2,0x8000 ; Set up a mask
5416 srw r2,r2,r6 ; Make a mask
5417 and. r0,r4,r2 ; See if this is even valid
5418 li r5,invalSpace ; Set the invalid address space VSID
5419 beqlr ; Leave if already invalid...
5420
5421 mtsrin r5,r9 ; Slam the segment register
5422 isync ; Need to make sure this is done
5423
5424 hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5425 stwcx. r4,0,r7 ; Set the valid SR flags
5426 beqlr++ ; Stored ok, no interrupt, time to leave...
5427
5428 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5429 b hbsrupt ; Try again...
5430
5431 ;
5432 ; This routine invadates the entire pmap segment cache
5433 ;
5434 ; Translation is on, interrupts may or may not be enabled.
5435 ;
5436
5437 .align 5
5438 .globl EXT(invalidateSegs)
5439
5440 LEXT(invalidateSegs)
5441
5442 la r10,pmapCCtl(r3) ; Point to the segment cache control
5443 eqv r2,r2,r2 ; Get all foxes
5444
5445 isInv: lwarx r4,0,r10 ; Get the segment cache control value
5446 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5447 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5448 bne-- isInv0 ; Yes, try again...
5449
5450 stwcx. r4,0,r10 ; Try to invalidate it
5451 bne-- isInv ; Someone else just stuffed it...
5452 blr ; Leave...
5453
5454
5455 isInv0: li r4,lgKillResv ; Get reservation kill zone
5456 stwcx. r4,0,r4 ; Kill reservation
5457
5458 isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5459 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5460 bne-- isInv ; Nope...
5461 b isInv1 ; Still locked do it again...
5462
5463 ;
5464 ; This routine switches segment registers between kernel and user.
5465 ; We have some assumptions and rules:
5466 ; We are in the exception vectors
5467 ; pf64Bitb is set up
5468 ; R3 contains the MSR we going to
5469 ; We can not use R4, R13, R20, R21, R29
5470 ; R13 is the savearea
5471 ; R29 has the per_proc
5472 ;
5473 ; We return R3 as 0 if we did not switch between kernel and user
5474 ; We also maintain and apply the user state key modifier used by VMM support;
5475 ; If we go to the kernel it is set to 0, otherwise it follows the bit
5476 ; in spcFlags.
5477 ;
5478
5479 .align 5
5480 .globl EXT(switchSegs)
5481
5482 LEXT(switchSegs)
5483
5484 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5485 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5486 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5487 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5488 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5489 or r2,r2,r3 ; This will 1 if we will be using user segments
5490 li r3,0 ; Get a selection mask
5491 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5492 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5493 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5494 la r19,ppUserPmap(r29) ; Point to the current user pmap
5495
5496 ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5497 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5498
5499 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5500 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5501 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5502 or r8,r8,r19 ; Get the pointer to the pmap we are using
5503
5504 beqlr ; We are staying in the same mode, do not touch segs...
5505
5506 lwz r28,0(r8) ; Get top half of pmap address
5507 lwz r10,4(r8) ; Get bottom half
5508
5509 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5510 rlwinm r28,r28,0,1,0 ; Copy top to top
5511 stw r30,ppMapFlags(r29) ; Set the key modifier
5512 rlwimi r28,r10,0,0,31 ; Insert bottom
5513
5514 la r10,pmapCCtl(r28) ; Point to the segment cache control
5515 la r9,pmapSegCache(r28) ; Point to the segment cache
5516
5517 ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5518 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5519 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5520 bne-- ssgLock0 ; Yup, this is in use...
5521
5522 stwcx. r16,0,r10 ; Try to set the lock
5523 bne-- ssgLock ; Did we get contention?
5524
5525 not r11,r15 ; Invert the invalids to valids
5526 li r17,0 ; Set a mask for the SRs we are loading
5527 isync ; Make sure we are all caught up
5528
5529 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5530
5531 li r0,0 ; Clear
5532 slbia ; Trash all SLB entries (except for entry 0 that is)
5533 li r17,1 ; Get SLB index to load (skip slb 0)
5534 oris r0,r0,0x8000 ; Get set for a mask
5535 b ssg64Enter ; Start on a cache line...
5536
5537 .align 5
5538
5539 ssgLock0: li r15,lgKillResv ; Killing field
5540 stwcx. r15,0,r15 ; Kill reservation
5541
5542 ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5543 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5544 beq++ ssgLock ; Yup, this is in use...
5545 b ssgLock1 ; Nope, try again...
5546 ;
5547 ; This is the 32-bit address space switch code.
5548 ; We take a reservation on the segment cache and walk through.
5549 ; For each entry, we load the specified entries and remember which
5550 ; we did with a mask. Then, we figure out which segments should be
5551 ; invalid and then see which actually are. Then we load those with the
5552 ; defined invalid VSID.
5553 ; Afterwards, we unlock the segment cache.
5554 ;
5555
5556 .align 5
5557
5558 ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5559 cmplwi r12,pmapSegCacheUse ; See if we are done
5560 slwi r14,r12,4 ; Index to the cache slot
5561 lis r0,0x8000 ; Get set for a mask
5562 add r14,r14,r9 ; Point to the entry
5563
5564 bge- ssg32Done ; All done...
5565
5566 lwz r5,sgcESID+4(r14) ; Get the ESID part
5567 srw r2,r0,r12 ; Form a mask for the one we are loading
5568 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5569
5570 andc r11,r11,r2 ; Clear the bit
5571 lwz r6,sgcVSID(r14) ; And get the VSID top
5572
5573 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5574
5575 xor r7,r7,r30 ; Modify the key before we actually set it
5576 srw r0,r0,r2 ; Get a mask for the SR we are loading
5577 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5578 or r17,r17,r0 ; Remember the segment
5579 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5580 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5581
5582 mtsrin r8,r5 ; Load the segment
5583 b ssg32Enter ; Go enter the next...
5584
5585 .align 5
5586
5587 ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5588 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5589
5590 lis r0,0x8000 ; Get set for a mask
5591 li r2,invalSpace ; Set the invalid address space VSID
5592
5593 nop ; Align loop
5594 nop ; Align loop
5595 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5596 nop ; Align loop
5597
5598 ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5599 cmplwi r18,16 ; Have we finished?
5600 srw r22,r0,r18 ; Get the mask bit
5601 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5602 andc r16,r16,r22 ; Get rid of the guy we just did
5603 bge ssg32Really ; Yes, we are really done now...
5604
5605 mtsrin r2,r23 ; Invalidate the SR
5606 b ssg32Inval ; Do the next...
5607
5608 .align 5
5609
5610 ssg32Really:
5611 stw r17,validSegs(r29) ; Set the valid SR flags
5612 li r3,1 ; Set kernel/user transition
5613 blr
5614
5615 ;
5616 ; This is the 64-bit address space switch code.
5617 ; First we blow away all of the SLB entries.
5618 ; Walk through,
5619 ; loading the SLB. Afterwards, we release the cache lock
5620 ;
5621 ; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5622 ; Its a performance thing...
5623 ;
5624
5625 .align 5
5626
5627 ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5628 cmplwi r12,pmapSegCacheUse ; See if we are done
5629 slwi r14,r12,4 ; Index to the cache slot
5630 srw r16,r0,r12 ; Form a mask for the one we are loading
5631 add r14,r14,r9 ; Point to the entry
5632 andc r11,r11,r16 ; Clear the bit
5633 bge-- ssg64Done ; All done...
5634
5635 ld r5,sgcESID(r14) ; Get the ESID part
5636 ld r6,sgcVSID(r14) ; And get the VSID part
5637 oris r5,r5,0x0800 ; Turn on the valid bit
5638 or r5,r5,r17 ; Insert the SLB slot
5639 xor r6,r6,r30 ; Modify the key before we actually set it
5640 addi r17,r17,1 ; Bump to the next slot
5641 slbmte r6,r5 ; Make that SLB entry
5642 b ssg64Enter ; Go enter the next...
5643
5644 .align 5
5645
5646 ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5647
5648 eqv r16,r16,r16 ; Load up with all foxes
5649 subfic r17,r17,64 ; Get the number of 1 bits we need
5650
5651 sld r16,r16,r17 ; Get a mask for the used SLB entries
5652 li r3,1 ; Set kernel/user transition
5653 std r16,validSegs(r29) ; Set the valid SR flags
5654 blr
5655
5656 ;
5657 ; mapSetUp - this function sets initial state for all mapping functions.
5658 ; We turn off all translations (physical), disable interruptions, and
5659 ; enter 64-bit mode if applicable.
5660 ;
5661 ; We also return the original MSR in r11, the feature flags in R12,
5662 ; and CR6 set up so we can do easy branches for 64-bit
5663 ; hw_clear_maps assumes r10, r9 will not be trashed.
5664 ;
5665
5666 .align 5
5667 .globl EXT(mapSetUp)
5668
5669 LEXT(mapSetUp)
5670
5671 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5672 mfsprg r12,2 ; Get feature flags
5673 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5674 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5675 mfmsr r11 ; Save the MSR
5676 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5677 andc r11,r11,r0 ; Clear VEC and FP for good
5678 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5679 li r2,1 ; Prepare for 64 bit
5680 andc r0,r11,r0 ; Clear the rest
5681 bt pfNoMSRirb,msuNoMSR ; No MSR...
5682 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
5683
5684 mtmsr r0 ; Translation and all off
5685 isync ; Toss prefetch
5686 blr ; Return...
5687
5688 .align 5
5689
5690 msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5691 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5692 isync ; synchronize
5693 blr ; Return...
5694
5695 .align 5
5696
5697 msuNoMSR: mr r2,r3 ; Save R3 across call
5698 mr r3,r0 ; Get the new MSR value
5699 li r0,loadMSR ; Get the MSR setter SC
5700 sc ; Set it
5701 mr r3,r2 ; Restore R3
5702 blr ; Go back all set up...
5703
5704
5705 ;
5706 ; Guest shadow assist -- remove all guest mappings
5707 ;
5708 ; Remove all mappings for a guest pmap from the shadow hash table.
5709 ;
5710 ; Parameters:
5711 ; r3 : address of pmap, 32-bit kernel virtual address
5712 ;
5713 ; Non-volatile register usage:
5714 ; r24 : host pmap's physical address
5715 ; r25 : VMM extension block's physical address
5716 ; r26 : physent address
5717 ; r27 : guest pmap's space ID number
5718 ; r28 : current hash table page index
5719 ; r29 : guest pmap's physical address
5720 ; r30 : saved msr image
5721 ; r31 : current mapping
5722 ;
5723 .align 5
5724 .globl EXT(hw_rem_all_gv)
5725
5726 LEXT(hw_rem_all_gv)
5727
5728 #define graStackSize ((31-24+1)*4)+4
5729 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5730 ; Mint a new stack frame
5731 mflr r0 ; Get caller's return address
5732 mfsprg r11,2 ; Get feature flags
5733 mtcrf 0x02,r11 ; Insert feature flags into cr6
5734 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5735 ; Save caller's return address
5736 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5737 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5738 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5739 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5740 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5741 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5742 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5743 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5744
5745 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5746
5747 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5748 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5749 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5750 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5751 b graStart ; Get to it
5752 gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5753 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5754 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5755 graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5756 xor r29,r3,r9 ; Convert pmap_t virt->real
5757 mr r30,r11 ; Save caller's msr image
5758
5759 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5760 bl sxlkExclusive ; Get lock exclusive
5761
5762 lwz r3,vxsGra(r25) ; Get remove all count
5763 addi r3,r3,1 ; Increment remove all count
5764 stw r3,vxsGra(r25) ; Update remove all count
5765
5766 li r28,0 ; r28 <- first hash page table index to search
5767 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5768 graPgLoop:
5769 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5770 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5771 ; Convert page index into page physical index offset
5772 add r31,r31,r11 ; Calculate page physical index entry address
5773 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5774 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5775 b graLoop ; Examine all slots in this page
5776 gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5777 b graLoop ; Examine all slots in this page
5778
5779 .align 5
5780 graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5781 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5782 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5783 xor r4,r4,r27 ; Compare space ID number
5784 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5785 bne graMiss ; Not one of ours, skip it
5786
5787 lwz r11,vxsGraHits(r25) ; Get remove hit count
5788 addi r11,r11,1 ; Increment remove hit count
5789 stw r11,vxsGraHits(r25) ; Update remove hit count
5790
5791 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5792 bne graRemPhys ; Yes, nothing to disconnect
5793
5794 lwz r11,vxsGraActive(r25) ; Get remove active count
5795 addi r11,r11,1 ; Increment remove hit count
5796 stw r11,vxsGraActive(r25) ; Update remove hit count
5797
5798 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5799 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5800 ; r31 <- mapping's physical address
5801 ; r3 -> PTE slot physical address
5802 ; r4 -> High-order 32 bits of PTE
5803 ; r5 -> Low-order 32 bits of PTE
5804 ; r6 -> PCA
5805 ; r7 -> PCA physical address
5806 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5807 b graFreePTE ; Join 64-bit path to release the PTE
5808 graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5809 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5810 graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5811 beq- graRemPhys ; No valid PTE, we're almost done
5812 lis r0,0x8000 ; Prepare free bit for this slot
5813 srw r0,r0,r2 ; Position free bit
5814 or r6,r6,r0 ; Set it in our PCA image
5815 lwz r8,mpPte(r31) ; Get PTE pointer
5816 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5817 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5818 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5819 stw r6,0(r7) ; Update PCA and unlock the PTEG
5820
5821 graRemPhys:
5822 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5823 bl mapFindLockPN ; Find 'n' lock this page's physent
5824 mr. r26,r3 ; Got lock on our physent?
5825 beq-- graBadPLock ; No, time to bail out
5826
5827 crset cr1_eq ; cr1_eq <- previous link is the anchor
5828 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5829 la r11,ppLink+4(r26) ; Point to chain anchor
5830 lwz r9,ppLink+4(r26) ; Get chain anchor
5831 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5832
5833 graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5834 cmplw r9,r31 ; Is this the mapping to remove?
5835 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5836 bne graRemNext ; No, chain onward
5837 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5838 stw r8,0(r11) ; Unchain gpv->phys mapping
5839 b graRemoved ; Exit loop
5840 graRemRetry:
5841 lwarx r0,0,r11 ; Get previous link
5842 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5843 stwcx. r0,0,r11 ; Update previous link
5844 bne- graRemRetry ; Lost reservation, retry
5845 b graRemoved ; Good work, let's get outta here
5846
5847 graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5848 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5849 mr. r9,r8 ; Does next entry exist?
5850 b graRemLoop ; Carry on
5851
5852 graRemove64:
5853 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5854 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5855 la r11,ppLink(r26) ; Point to chain anchor
5856 ld r9,ppLink(r26) ; Get chain anchor
5857 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5858 graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5859 cmpld r9,r31 ; Is this the mapping to remove?
5860 ld r8,mpAlias(r9) ; Get forward chain pinter
5861 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5862 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5863 std r8,0(r11) ; Unchain gpv->phys mapping
5864 b graRemoved ; Exit loop
5865 graRem64Rt: ldarx r0,0,r11 ; Get previous link
5866 and r0,r0,r7 ; Get flags
5867 or r0,r0,r8 ; Insert new forward pointer
5868 stdcx. r0,0,r11 ; Slam it back in
5869 bne-- graRem64Rt ; Lost reservation, retry
5870 b graRemoved ; Good work, let's go home
5871
5872 graRem64Nxt:
5873 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5874 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5875 mr. r9,r8 ; Does next entry exist?
5876 b graRem64Lp ; Carry on
5877
5878 graRemoved:
5879 mr r3,r26 ; r3 <- physent's address
5880 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5881
5882 lwz r3,mpFlags(r31) ; Get mapping's flags
5883 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5884 ori r3,r3,mpgFree ; Mark mapping free
5885 stw r3,mpFlags(r31) ; Update flags
5886
5887 graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5888 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5889 bne graLoop ; No, examine next slot
5890 addi r28,r28,1 ; Increment hash table page index
5891 cmplwi r28,GV_HPAGES ; End of hash table?
5892 bne graPgLoop ; Examine next hash table page
5893
5894 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5895 bl sxlkUnlock ; Release host pmap's search lock
5896
5897 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5898 mtmsr r30 ; Restore 'rupts, translation
5899 isync ; Throw a small wrench into the pipeline
5900 b graPopFrame ; Nothing to do now but pop a frame and return
5901 graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5902 graPopFrame:
5903 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5904 ; Get caller's return address
5905 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5906 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5907 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5908 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5909 mtlr r0 ; Prepare return address
5910 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5911 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5912 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5913 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5914 lwz r1,0(r1) ; Pop stack frame
5915 blr ; Return to caller
5916
5917 graBadPLock:
5918 graRemoveMiss:
5919 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5920 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5921 li r3,failMapping ; The BOMB, Dmitri.
5922 sc ; The hydrogen bomb.
5923
5924
5925 ;
5926 ; Guest shadow assist -- remove local guest mappings
5927 ;
5928 ; Remove local mappings for a guest pmap from the shadow hash table.
5929 ;
5930 ; Parameters:
5931 ; r3 : address of guest pmap, 32-bit kernel virtual address
5932 ;
5933 ; Non-volatile register usage:
5934 ; r20 : current active map word's physical address
5935 ; r21 : current hash table page address
5936 ; r22 : updated active map word in process
5937 ; r23 : active map word in process
5938 ; r24 : host pmap's physical address
5939 ; r25 : VMM extension block's physical address
5940 ; r26 : physent address
5941 ; r27 : guest pmap's space ID number
5942 ; r28 : current active map index
5943 ; r29 : guest pmap's physical address
5944 ; r30 : saved msr image
5945 ; r31 : current mapping
5946 ;
5947 .align 5
5948 .globl EXT(hw_rem_local_gv)
5949
5950 LEXT(hw_rem_local_gv)
5951
5952 #define grlStackSize ((31-20+1)*4)+4
5953 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5954 ; Mint a new stack frame
5955 mflr r0 ; Get caller's return address
5956 mfsprg r11,2 ; Get feature flags
5957 mtcrf 0x02,r11 ; Insert feature flags into cr6
5958 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5959 ; Save caller's return address
5960 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5961 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5962 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5963 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5964 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5965 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5966 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5967 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5968 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
5969 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
5970 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
5971 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
5972
5973 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5974
5975 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
5976 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5977 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5978 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5979 b grlStart ; Get to it
5980 grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5981 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5982 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5983
5984 grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5985 xor r29,r3,r9 ; Convert pmap_t virt->real
5986 mr r30,r11 ; Save caller's msr image
5987
5988 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5989 bl sxlkExclusive ; Get lock exclusive
5990
5991 li r28,0 ; r28 <- index of first active map word to search
5992 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5993 b grlMap1st ; Examine first map word
5994
5995 .align 5
5996 grlNextMap: stw r22,0(r21) ; Save updated map word
5997 addi r28,r28,1 ; Increment map word index
5998 cmplwi r28,GV_MAP_WORDS ; See if we're done
5999 beq grlDone ; Yup, let's get outta here
6000
6001 grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
6002 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
6003 ; Convert map index into map index offset
6004 add r20,r20,r11 ; Calculate map array element address
6005 lwz r22,0(r20) ; Get active map word at index
6006 mr. r23,r22 ; Any active mappings indicated?
6007 beq grlNextMap ; Nope, check next word
6008
6009 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6010 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6011 ; Extract page index from map word index and convert
6012 ; into page physical index offset
6013 add r21,r21,r11 ; Calculate page physical index entry address
6014 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6015 lwz r21,4(r21) ; Get selected hash table page's address
6016 b grlLoop ; Examine all slots in this page
6017 grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6018 b grlLoop ; Examine all slots in this page
6019
6020 .align 5
6021 grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6022 cmplwi r11,32 ; Any active mappings left in this word?
6023 lis r12,0x8000 ; Prepare mask to reset bit
6024 srw r12,r12,r11 ; Position mask bit
6025 andc r23,r23,r12 ; Reset lit bit
6026 beq grlNextMap ; No bits lit, examine next map word
6027
6028 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6029 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6030 ; Extract slot band number from index and insert
6031 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6032
6033 lwz r3,mpFlags(r31) ; Get mapping's flags
6034 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6035 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6036 xor r4,r4,r27 ; Compare space ID number
6037 or. r4,r4,r5 ; (space id miss || global)
6038 bne grlLoop ; Not one of ours, skip it
6039 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6040 ori r3,r3,mpgDormant ; Mark entry dormant
6041 stw r3,mpFlags(r31) ; Update mapping's flags
6042
6043 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6044 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6045 ; r31 <- mapping's physical address
6046 ; r3 -> PTE slot physical address
6047 ; r4 -> High-order 32 bits of PTE
6048 ; r5 -> Low-order 32 bits of PTE
6049 ; r6 -> PCA
6050 ; r7 -> PCA physical address
6051 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6052 b grlFreePTE ; Join 64-bit path to release the PTE
6053 grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6054 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6055 grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6056 beq- grlLoop ; No valid PTE, we're done with this mapping
6057 lis r0,0x8000 ; Prepare free bit for this slot
6058 srw r0,r0,r2 ; Position free bit
6059 or r6,r6,r0 ; Set it in our PCA image
6060 lwz r8,mpPte(r31) ; Get PTE pointer
6061 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6062 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6063 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6064 stw r6,0(r7) ; Update PCA and unlock the PTEG
6065 b grlLoop ; On to next active mapping in this map word
6066
6067 grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6068 bl sxlkUnlock ; Release host pmap's search lock
6069
6070 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6071 mtmsr r30 ; Restore 'rupts, translation
6072 isync ; Throw a small wrench into the pipeline
6073 b grlPopFrame ; Nothing to do now but pop a frame and return
6074 grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6075 grlPopFrame:
6076 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6077 ; Get caller's return address
6078 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6079 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6080 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6081 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6082 mtlr r0 ; Prepare return address
6083 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6084 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6085 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6086 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6087 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6088 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6089 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6090 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6091 lwz r1,0(r1) ; Pop stack frame
6092 blr ; Return to caller
6093
6094
6095 ;
6096 ; Guest shadow assist -- resume a guest mapping
6097 ;
6098 ; Locates the specified dormant mapping, and if it exists validates it and makes it
6099 ; active.
6100 ;
6101 ; Parameters:
6102 ; r3 : address of host pmap, 32-bit kernel virtual address
6103 ; r4 : address of guest pmap, 32-bit kernel virtual address
6104 ; r5 : host virtual address, high-order 32 bits
6105 ; r6 : host virtual address, low-order 32 bits
6106 ; r7 : guest virtual address, high-order 32 bits
6107 ; r8 : guest virtual address, low-order 32 bits
6108 ; r9 : guest mapping protection code
6109 ;
6110 ; Non-volatile register usage:
6111 ; r23 : VMM extension block's physical address
6112 ; r24 : physent physical address
6113 ; r25 : caller's msr image from mapSetUp
6114 ; r26 : guest mapping protection code
6115 ; r27 : host pmap physical address
6116 ; r28 : guest pmap physical address
6117 ; r29 : host virtual address
6118 ; r30 : guest virtual address
6119 ; r31 : gva->phys mapping's physical address
6120 ;
6121 .align 5
6122 .globl EXT(hw_res_map_gv)
6123
6124 LEXT(hw_res_map_gv)
6125
6126 #define grsStackSize ((31-23+1)*4)+4
6127
6128 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6129 ; Mint a new stack frame
6130 mflr r0 ; Get caller's return address
6131 mfsprg r11,2 ; Get feature flags
6132 mtcrf 0x02,r11 ; Insert feature flags into cr6
6133 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6134 ; Save caller's return address
6135 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6136 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6137 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6138 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6139 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6140 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6141 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6142 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6143 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6144
6145 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6146 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6147 mr r26,r9 ; Copy guest mapping protection code
6148
6149 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6150 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6151 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6152 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6153 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6154 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6155 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6156 srwi r11,r30,12 ; Form shadow hash:
6157 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6158 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6159 ; Form index offset from hash page number
6160 add r31,r31,r10 ; r31 <- hash page index entry
6161 lwz r31,4(r31) ; r31 <- hash page paddr
6162 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6163 ; r31 <- hash group paddr
6164 b grsStart ; Get to it
6165
6166 grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6167 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6168 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6169 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6170 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6171 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6172 srwi r11,r30,12 ; Form shadow hash:
6173 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6174 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6175 ; Form index offset from hash page number
6176 add r31,r31,r10 ; r31 <- hash page index entry
6177 ld r31,0(r31) ; r31 <- hash page paddr
6178 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6179 ; r31 <- hash group paddr
6180
6181 grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6182 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6183 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6184 mr r25,r11 ; Save caller's msr image
6185
6186 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6187 bl sxlkExclusive ; Get lock exclusive
6188
6189 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6190 mtctr r0 ; in this group
6191 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6192
6193 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6194 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6195 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6196 b grs32SrchLp ; Let the search begin!
6197
6198 .align 5
6199 grs32SrchLp:
6200 mr r6,r3 ; r6 <- current mapping slot's flags
6201 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6202 mr r7,r4 ; r7 <- current mapping slot's space ID
6203 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6204 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6205 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6206 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6207 xor r7,r7,r9 ; Compare space ID
6208 or r0,r11,r7 ; r0 <- !(!free && space match)
6209 xor r8,r8,r30 ; Compare virtual address
6210 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6211 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6212
6213 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6214 bdnz grs32SrchLp ; Iterate
6215
6216 mr r6,r3 ; r6 <- current mapping slot's flags
6217 clrrwi r5,r5,12 ; Remove flags from virtual address
6218 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6219 xor r4,r4,r9 ; Compare space ID
6220 or r0,r11,r4 ; r0 <- !(!free && space match)
6221 xor r5,r5,r30 ; Compare virtual address
6222 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6223 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6224 b grsSrchMiss ; No joy in our hash group
6225
6226 grs64Search:
6227 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6228 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6229 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6230 b grs64SrchLp ; Let the search begin!
6231
6232 .align 5
6233 grs64SrchLp:
6234 mr r6,r3 ; r6 <- current mapping slot's flags
6235 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6236 mr r7,r4 ; r7 <- current mapping slot's space ID
6237 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6238 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6239 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6240 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6241 xor r7,r7,r9 ; Compare space ID
6242 or r0,r11,r7 ; r0 <- !(!free && space match)
6243 xor r8,r8,r30 ; Compare virtual address
6244 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6245 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6246
6247 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6248 bdnz grs64SrchLp ; Iterate
6249
6250 mr r6,r3 ; r6 <- current mapping slot's flags
6251 clrrdi r5,r5,12 ; Remove flags from virtual address
6252 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6253 xor r4,r4,r9 ; Compare space ID
6254 or r0,r11,r4 ; r0 <- !(!free && space match)
6255 xor r5,r5,r30 ; Compare virtual address
6256 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6257 bne grsSrchMiss ; No joy in our hash group
6258
6259 grsSrchHit:
6260 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6261 bne grsFindHost ; Yes, nothing to disconnect
6262
6263 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6264 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6265 ; r31 <- mapping's physical address
6266 ; r3 -> PTE slot physical address
6267 ; r4 -> High-order 32 bits of PTE
6268 ; r5 -> Low-order 32 bits of PTE
6269 ; r6 -> PCA
6270 ; r7 -> PCA physical address
6271 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6272 b grsFreePTE ; Join 64-bit path to release the PTE
6273 grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6274 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6275 grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6276 beq- grsFindHost ; No valid PTE, we're almost done
6277 lis r0,0x8000 ; Prepare free bit for this slot
6278 srw r0,r0,r2 ; Position free bit
6279 or r6,r6,r0 ; Set it in our PCA image
6280 lwz r8,mpPte(r31) ; Get PTE pointer
6281 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6282 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6283 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6284 stw r6,0(r7) ; Update PCA and unlock the PTEG
6285
6286 grsFindHost:
6287
6288 // We now have a dormant guest mapping that matches our space id and virtual address. Our next
6289 // step is to locate the host mapping that completes the guest mapping's connection to a physical
6290 // frame. The guest and host mappings must connect to the same physical frame, so they must both
6291 // be chained on the same physent. We search the physent chain for a host mapping matching our
6292 // host's space id and the host virtual address. If we succeed, we know that the entire chain
6293 // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6294 // resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6295 // host virtual or physical address has changed since the guest mapping was suspended, so it
6296 // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6297 // our caller that it will have to take its long path, translating the host virtual address
6298 // through the host's skiplist and installing a new guest mapping.
6299
6300 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6301 bl mapFindLockPN ; Find 'n' lock this page's physent
6302 mr. r24,r3 ; Got lock on our physent?
6303 beq-- grsBadPLock ; No, time to bail out
6304
6305 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6306
6307 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6308 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6309 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6310 grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6311 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6312 lwz r7,mpFlags(r12) ; Get mapping's flags
6313 lhz r4,mpSpace(r12) ; Get mapping's space id number
6314 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6315 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6316
6317 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6318 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6319 xori r0,r0,mpNormal ; Normal mapping?
6320 xor r4,r4,r6 ; Compare w/ host space id number
6321 xor r5,r5,r29 ; Compare w/ host virtual address
6322 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6323 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6324 beq grsPEHit ; Hit
6325 b grsPELoop ; Iterate
6326
6327 grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6328 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6329 ld r9,ppLink(r24) ; Get first mapping on physent
6330 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6331 andc r9,r9,r0 ; Cleanup mapping pointer
6332 grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6333 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6334 lwz r7,mpFlags(r12) ; Get mapping's flags
6335 lhz r4,mpSpace(r12) ; Get mapping's space id number
6336 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6337 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6338 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6339 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6340 xori r0,r0,mpNormal ; Normal mapping?
6341 xor r4,r4,r6 ; Compare w/ host space id number
6342 xor r5,r5,r29 ; Compare w/ host virtual address
6343 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6344 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6345 beq grsPEHit ; Hit
6346 b grsPELp64 ; Iterate
6347
6348 grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6349 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6350 stw r0,mpVAddr+4(r31) ; Write 'em back
6351
6352 eieio ; Ensure previous mapping updates are visible
6353 lwz r0,mpFlags(r31) ; Get flags
6354 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6355 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6356
6357 li r31,mapRtOK ; Indicate success
6358 b grsRelPhy ; Exit through physent lock release
6359
6360 grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6361 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6362 la r11,ppLink+4(r24) ; Point to chain anchor
6363 lwz r9,ppLink+4(r24) ; Get chain anchor
6364 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6365 grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6366 cmplw r9,r31 ; Is this the mapping to remove?
6367 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6368 bne grsRemNext ; No, chain onward
6369 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6370 stw r8,0(r11) ; Unchain gpv->phys mapping
6371 b grsDelete ; Finish deleting mapping
6372 grsRemRetry:
6373 lwarx r0,0,r11 ; Get previous link
6374 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6375 stwcx. r0,0,r11 ; Update previous link
6376 bne- grsRemRetry ; Lost reservation, retry
6377 b grsDelete ; Finish deleting mapping
6378
6379 .align 5
6380 grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6381 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6382 mr. r9,r8 ; Does next entry exist?
6383 b grsRemLoop ; Carry on
6384
6385 grsRemove64:
6386 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6387 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6388 la r11,ppLink(r24) ; Point to chain anchor
6389 ld r9,ppLink(r24) ; Get chain anchor
6390 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6391 grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6392 cmpld r9,r31 ; Is this the mapping to remove?
6393 ld r8,mpAlias(r9) ; Get forward chain pinter
6394 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6395 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6396 std r8,0(r11) ; Unchain gpv->phys mapping
6397 b grsDelete ; Finish deleting mapping
6398 grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6399 and r0,r0,r7 ; Get flags
6400 or r0,r0,r8 ; Insert new forward pointer
6401 stdcx. r0,0,r11 ; Slam it back in
6402 bne-- grsRem64Rt ; Lost reservation, retry
6403 b grsDelete ; Finish deleting mapping
6404
6405 .align 5
6406 grsRem64Nxt:
6407 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6408 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6409 mr. r9,r8 ; Does next entry exist?
6410 b grsRem64Lp ; Carry on
6411
6412 grsDelete:
6413 lwz r3,mpFlags(r31) ; Get mapping's flags
6414 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6415 ori r3,r3,mpgFree ; Mark mapping free
6416 stw r3,mpFlags(r31) ; Update flags
6417
6418 li r31,mapRtNotFnd ; Didn't succeed
6419
6420 grsRelPhy: mr r3,r24 ; r3 <- physent addr
6421 bl mapPhysUnlock ; Unlock physent chain
6422
6423 grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6424 bl sxlkUnlock ; Release host pmap search lock
6425
6426 grsRtn: mr r3,r31 ; r3 <- result code
6427 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6428 mtmsr r25 ; Restore 'rupts, translation
6429 isync ; Throw a small wrench into the pipeline
6430 b grsPopFrame ; Nothing to do now but pop a frame and return
6431 grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6432 grsPopFrame:
6433 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6434 ; Get caller's return address
6435 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6436 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6437 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6438 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6439 mtlr r0 ; Prepare return address
6440 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6441 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6442 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6443 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6444 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6445 lwz r1,0(r1) ; Pop stack frame
6446 blr ; Return to caller
6447
6448 .align 5
6449 grsSrchMiss:
6450 li r31,mapRtNotFnd ; Could not locate requested mapping
6451 b grsRelPmap ; Exit through host pmap search lock release
6452
6453 grsBadPLock:
6454 grsPEMissMiss:
6455 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6456 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6457 li r3,failMapping ; The BOMB, Dmitri.
6458 sc ; The hydrogen bomb.
6459
6460
6461 ;
6462 ; Guest shadow assist -- add a guest mapping
6463 ;
6464 ; Adds a guest mapping.
6465 ;
6466 ; Parameters:
6467 ; r3 : address of host pmap, 32-bit kernel virtual address
6468 ; r4 : address of guest pmap, 32-bit kernel virtual address
6469 ; r5 : guest virtual address, high-order 32 bits
6470 ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6471 ; r7 : new mapping's flags
6472 ; r8 : physical address, 32-bit page number
6473 ;
6474 ; Non-volatile register usage:
6475 ; r22 : hash group's physical address
6476 ; r23 : VMM extension block's physical address
6477 ; r24 : mapping's flags
6478 ; r25 : caller's msr image from mapSetUp
6479 ; r26 : physent physical address
6480 ; r27 : host pmap physical address
6481 ; r28 : guest pmap physical address
6482 ; r29 : physical address, 32-bit 4k-page number
6483 ; r30 : guest virtual address
6484 ; r31 : gva->phys mapping's physical address
6485 ;
6486
6487 .align 5
6488 .globl EXT(hw_add_map_gv)
6489
6490
6491 LEXT(hw_add_map_gv)
6492
6493 #define gadStackSize ((31-22+1)*4)+4
6494
6495 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6496 ; Mint a new stack frame
6497 mflr r0 ; Get caller's return address
6498 mfsprg r11,2 ; Get feature flags
6499 mtcrf 0x02,r11 ; Insert feature flags into cr6
6500 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6501 ; Save caller's return address
6502 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6503 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6504 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6505 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6506 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6507 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6508 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6509 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6510 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6511 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6512
6513 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6514 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6515 mr r24,r7 ; Copy guest mapping's flags
6516 mr r29,r8 ; Copy target frame's physical address
6517
6518 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6519 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6520 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6521 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6522 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6523 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6524 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6525 srwi r11,r30,12 ; Form shadow hash:
6526 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6527 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6528 ; Form index offset from hash page number
6529 add r22,r22,r10 ; r22 <- hash page index entry
6530 lwz r22,4(r22) ; r22 <- hash page paddr
6531 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6532 ; r22 <- hash group paddr
6533 b gadStart ; Get to it
6534
6535 gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6536 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6537 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6538 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6539 srwi r11,r30,12 ; Form shadow hash:
6540 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6541 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6542 ; Form index offset from hash page number
6543 add r22,r22,r10 ; r22 <- hash page index entry
6544 ld r22,0(r22) ; r22 <- hash page paddr
6545 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6546 ; r22 <- hash group paddr
6547
6548 gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6549 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6550 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6551 mr r25,r11 ; Save caller's msr image
6552
6553 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6554 bl sxlkExclusive ; Get lock exlusive
6555
6556 mr r31,r22 ; Prepare to search this group
6557 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6558 mtctr r0 ; in this group
6559 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6560
6561 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6562 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6563 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6564 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6565 b gad32SrchLp ; Let the search begin!
6566
6567 .align 5
6568 gad32SrchLp:
6569 mr r6,r3 ; r6 <- current mapping slot's flags
6570 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6571 mr r7,r4 ; r7 <- current mapping slot's space ID
6572 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6573 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6574 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6575 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6576 xor r7,r7,r9 ; Compare space ID
6577 or r0,r11,r7 ; r0 <- !(!free && space match)
6578 xor r8,r8,r12 ; Compare virtual address
6579 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6580 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6581
6582 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6583 bdnz gad32SrchLp ; Iterate
6584
6585 mr r6,r3 ; r6 <- current mapping slot's flags
6586 clrrwi r5,r5,12 ; Remove flags from virtual address
6587 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6588 xor r4,r4,r9 ; Compare space ID
6589 or r0,r11,r4 ; r0 <- !(!free && && space match)
6590 xor r5,r5,r12 ; Compare virtual address
6591 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6592 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6593 b gadScan ; No joy in our hash group
6594
6595 gad64Search:
6596 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6597 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6598 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6599 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6600 b gad64SrchLp ; Let the search begin!
6601
6602 .align 5
6603 gad64SrchLp:
6604 mr r6,r3 ; r6 <- current mapping slot's flags
6605 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6606 mr r7,r4 ; r7 <- current mapping slot's space ID
6607 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6608 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6609 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6610 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6611 xor r7,r7,r9 ; Compare space ID
6612 or r0,r11,r7 ; r0 <- !(!free && space match)
6613 xor r8,r8,r12 ; Compare virtual address
6614 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6615 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6616
6617 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6618 bdnz gad64SrchLp ; Iterate
6619
6620 mr r6,r3 ; r6 <- current mapping slot's flags
6621 clrrdi r5,r5,12 ; Remove flags from virtual address
6622 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6623 xor r4,r4,r9 ; Compare space ID
6624 or r0,r11,r4 ; r0 <- !(!free && && space match)
6625 xor r5,r5,r12 ; Compare virtual address
6626 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6627 bne gadScan ; No joy in our hash group
6628 b gadRelPmap ; Hit, let upper-level redrive sort it out
6629
6630 gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6631 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6632 ; Prepare to address slot at cursor
6633 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6634 mtctr r0 ; in this group
6635 or r2,r22,r12 ; r2 <- 1st mapping to search
6636 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6637 li r11,0 ; No dormant entries found yet
6638 b gadScanLoop ; Let the search begin!
6639
6640 .align 5
6641 gadScanLoop:
6642 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6643 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6644 ; Trim off any carry, wrapping into slot number range
6645 mr r31,r2 ; r31 <- current mapping's address
6646 or r2,r22,r12 ; r2 <- next mapping to search
6647 mr r6,r3 ; r6 <- current mapping slot's flags
6648 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6649 rlwinm. r0,r6,0,mpgFree ; Test free flag
6650 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6651 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6652 xori r0,r0,mpgDormant ; Invert dormant flag
6653 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6654 bne gadNotDorm ; Not dormant or we've already seen one
6655 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6656 gadNotDorm: bdnz gadScanLoop ; Iterate
6657
6658 mr r31,r2 ; r31 <- final mapping's address
6659 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6660 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6661 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6662 xori r0,r0,mpgDormant ; Invert dormant flag
6663 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6664 bne gadCkDormant ; Not dormant or we've already seen one
6665 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6666
6667 gadCkDormant:
6668 mr. r31,r11 ; Get dormant mapping, if any, and test
6669 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6670
6671 gadSteal:
6672 lbz r12,mpgCursor(r22) ; Get group's cursor
6673 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6674 ; Prepare to address slot at cursor
6675 or r31,r22,r12 ; r31 <- address of mapping to steal
6676
6677 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6678 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6679 ; r31 <- mapping's physical address
6680 ; r3 -> PTE slot physical address
6681 ; r4 -> High-order 32 bits of PTE
6682 ; r5 -> Low-order 32 bits of PTE
6683 ; r6 -> PCA
6684 ; r7 -> PCA physical address
6685 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6686 b gadFreePTE ; Join 64-bit path to release the PTE
6687 gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6688 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6689 gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6690 beq- gadUpCursor ; No valid PTE, we're almost done
6691 lis r0,0x8000 ; Prepare free bit for this slot
6692 srw r0,r0,r2 ; Position free bit
6693 or r6,r6,r0 ; Set it in our PCA image
6694 lwz r8,mpPte(r31) ; Get PTE pointer
6695 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6696 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6697 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6698 stw r6,0(r7) ; Update PCA and unlock the PTEG
6699
6700 gadUpCursor:
6701 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6702 ; Recover slot number from stolen mapping's address
6703 addi r12,r12,1 ; Increment slot number
6704 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6705 stb r12,mpgCursor(r22) ; Update group's cursor
6706
6707 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6708 bl mapFindLockPN ; Find 'n' lock this page's physent
6709 mr. r26,r3 ; Got lock on our physent?
6710 beq-- gadBadPLock ; No, time to bail out
6711
6712 crset cr1_eq ; cr1_eq <- previous link is the anchor
6713 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6714 la r11,ppLink+4(r26) ; Point to chain anchor
6715 lwz r9,ppLink+4(r26) ; Get chain anchor
6716 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6717 gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6718 cmplw r9,r31 ; Is this the mapping to remove?
6719 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6720 bne gadRemNext ; No, chain onward
6721 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6722 stw r8,0(r11) ; Unchain gpv->phys mapping
6723 b gadDelDone ; Finish deleting mapping
6724 gadRemRetry:
6725 lwarx r0,0,r11 ; Get previous link
6726 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6727 stwcx. r0,0,r11 ; Update previous link
6728 bne- gadRemRetry ; Lost reservation, retry
6729 b gadDelDone ; Finish deleting mapping
6730
6731 gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6732 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6733 mr. r9,r8 ; Does next entry exist?
6734 b gadRemLoop ; Carry on
6735
6736 gadRemove64:
6737 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6738 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6739 la r11,ppLink(r26) ; Point to chain anchor
6740 ld r9,ppLink(r26) ; Get chain anchor
6741 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6742 gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6743 cmpld r9,r31 ; Is this the mapping to remove?
6744 ld r8,mpAlias(r9) ; Get forward chain pinter
6745 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6746 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6747 std r8,0(r11) ; Unchain gpv->phys mapping
6748 b gadDelDone ; Finish deleting mapping
6749 gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6750 and r0,r0,r7 ; Get flags
6751 or r0,r0,r8 ; Insert new forward pointer
6752 stdcx. r0,0,r11 ; Slam it back in
6753 bne-- gadRem64Rt ; Lost reservation, retry
6754 b gadDelDone ; Finish deleting mapping
6755
6756 .align 5
6757 gadRem64Nxt:
6758 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6759 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6760 mr. r9,r8 ; Does next entry exist?
6761 b gadRem64Lp ; Carry on
6762
6763 gadDelDone:
6764 mr r3,r26 ; Get physent address
6765 bl mapPhysUnlock ; Unlock physent chain
6766
6767 gadFillMap:
6768 lwz r12,pmapSpace(r28) ; Get guest space id number
6769 li r2,0 ; Get a zero
6770 stw r24,mpFlags(r31) ; Set mapping's flags
6771 sth r12,mpSpace(r31) ; Set mapping's space id number
6772 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6773 stw r29,mpPAddr(r31) ; Set mapping's physical address
6774 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6775 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6776 b gadChain ; Continue with chaining mapping to physent
6777 gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6778
6779 gadChain: mr r3,r29 ; r3 <- physical frame address
6780 bl mapFindLockPN ; Find 'n' lock this page's physent
6781 mr. r26,r3 ; Got lock on our physent?
6782 beq-- gadBadPLock ; No, time to bail out
6783
6784 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6785 lwz r12,ppLink+4(r26) ; Get forward chain
6786 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6787 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6788 stw r11,mpAlias+4(r31) ; New mapping will head chain
6789 stw r12,ppLink+4(r26) ; Point physent to new mapping
6790 b gadFinish ; All over now...
6791
6792 gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6793 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6794 ld r12,ppLink(r26) ; Get forward chain
6795 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6796 and r12,r12,r7 ; Isolate pointer's flags
6797 or r12,r12,r31 ; Insert new mapping's address forming pointer
6798 std r11,mpAlias(r31) ; New mapping will head chain
6799 std r12,ppLink(r26) ; Point physent to new mapping
6800
6801 gadFinish: eieio ; Ensure new mapping is completely visible
6802
6803 gadRelPhy: mr r3,r26 ; r3 <- physent addr
6804 bl mapPhysUnlock ; Unlock physent chain
6805
6806 gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6807 bl sxlkUnlock ; Release host pmap search lock
6808
6809 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6810 mtmsr r25 ; Restore 'rupts, translation
6811 isync ; Throw a small wrench into the pipeline
6812 b gadPopFrame ; Nothing to do now but pop a frame and return
6813 gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6814 gadPopFrame:
6815 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6816 ; Get caller's return address
6817 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6818 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6819 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6820 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6821 mtlr r0 ; Prepare return address
6822 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6823 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6824 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6825 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6826 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6827 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6828 lwz r1,0(r1) ; Pop stack frame
6829 blr ; Return to caller
6830
6831 gadPEMissMiss:
6832 gadBadPLock:
6833 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6834 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6835 li r3,failMapping ; The BOMB, Dmitri.
6836 sc ; The hydrogen bomb.
6837
6838
6839 ;
6840 ; Guest shadow assist -- supend a guest mapping
6841 ;
6842 ; Suspends a guest mapping.
6843 ;
6844 ; Parameters:
6845 ; r3 : address of host pmap, 32-bit kernel virtual address
6846 ; r4 : address of guest pmap, 32-bit kernel virtual address
6847 ; r5 : guest virtual address, high-order 32 bits
6848 ; r6 : guest virtual address, low-order 32 bits
6849 ;
6850 ; Non-volatile register usage:
6851 ; r26 : VMM extension block's physical address
6852 ; r27 : host pmap physical address
6853 ; r28 : guest pmap physical address
6854 ; r29 : caller's msr image from mapSetUp
6855 ; r30 : guest virtual address
6856 ; r31 : gva->phys mapping's physical address
6857 ;
6858
6859 .align 5
6860 .globl EXT(hw_susp_map_gv)
6861
6862 LEXT(hw_susp_map_gv)
6863
6864 #define gsuStackSize ((31-26+1)*4)+4
6865
6866 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6867 ; Mint a new stack frame
6868 mflr r0 ; Get caller's return address
6869 mfsprg r11,2 ; Get feature flags
6870 mtcrf 0x02,r11 ; Insert feature flags into cr6
6871 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6872 ; Save caller's return address
6873 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6874 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6875 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6876 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6877 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6878 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6879
6880 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6881
6882 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6883 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6884 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6885
6886 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6887 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6888 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6889 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6890 srwi r11,r30,12 ; Form shadow hash:
6891 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6892 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6893 ; Form index offset from hash page number
6894 add r31,r31,r10 ; r31 <- hash page index entry
6895 lwz r31,4(r31) ; r31 <- hash page paddr
6896 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6897 ; r31 <- hash group paddr
6898 b gsuStart ; Get to it
6899 gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6900 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6901 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6902 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6903 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6904 srwi r11,r30,12 ; Form shadow hash:
6905 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6906 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6907 ; Form index offset from hash page number
6908 add r31,r31,r10 ; r31 <- hash page index entry
6909 ld r31,0(r31) ; r31 <- hash page paddr
6910 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6911 ; r31 <- hash group paddr
6912
6913 gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6914 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6915 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6916 mr r29,r11 ; Save caller's msr image
6917
6918 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6919 bl sxlkExclusive ; Get lock exclusive
6920
6921 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6922 mtctr r0 ; in this group
6923 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6924
6925 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6926 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6927 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6928 b gsu32SrchLp ; Let the search begin!
6929
6930 .align 5
6931 gsu32SrchLp:
6932 mr r6,r3 ; r6 <- current mapping slot's flags
6933 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6934 mr r7,r4 ; r7 <- current mapping slot's space ID
6935 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6936 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6937 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6938 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6939 xor r7,r7,r9 ; Compare space ID
6940 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6941 xor r8,r8,r30 ; Compare virtual address
6942 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6943 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6944
6945 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6946 bdnz gsu32SrchLp ; Iterate
6947
6948 mr r6,r3 ; r6 <- current mapping slot's flags
6949 clrrwi r5,r5,12 ; Remove flags from virtual address
6950 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6951 xor r4,r4,r9 ; Compare space ID
6952 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6953 xor r5,r5,r30 ; Compare virtual address
6954 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6955 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6956 b gsuSrchMiss ; No joy in our hash group
6957
6958 gsu64Search:
6959 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6960 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6961 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6962 b gsu64SrchLp ; Let the search begin!
6963
6964 .align 5
6965 gsu64SrchLp:
6966 mr r6,r3 ; r6 <- current mapping slot's flags
6967 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6968 mr r7,r4 ; r7 <- current mapping slot's space ID
6969 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6970 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6971 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6972 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6973 xor r7,r7,r9 ; Compare space ID
6974 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6975 xor r8,r8,r30 ; Compare virtual address
6976 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6977 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6978
6979 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6980 bdnz gsu64SrchLp ; Iterate
6981
6982 mr r6,r3 ; r6 <- current mapping slot's flags
6983 clrrdi r5,r5,12 ; Remove flags from virtual address
6984 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6985 xor r4,r4,r9 ; Compare space ID
6986 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6987 xor r5,r5,r30 ; Compare virtual address
6988 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6989 bne gsuSrchMiss ; No joy in our hash group
6990
6991 gsuSrchHit:
6992 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
6993 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6994 ; r31 <- mapping's physical address
6995 ; r3 -> PTE slot physical address
6996 ; r4 -> High-order 32 bits of PTE
6997 ; r5 -> Low-order 32 bits of PTE
6998 ; r6 -> PCA
6999 ; r7 -> PCA physical address
7000 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7001 b gsuFreePTE ; Join 64-bit path to release the PTE
7002 gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7003 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7004 gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
7005 beq- gsuNoPTE ; No valid PTE, we're almost done
7006 lis r0,0x8000 ; Prepare free bit for this slot
7007 srw r0,r0,r2 ; Position free bit
7008 or r6,r6,r0 ; Set it in our PCA image
7009 lwz r8,mpPte(r31) ; Get PTE pointer
7010 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7011 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7012 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7013 stw r6,0(r7) ; Update PCA and unlock the PTEG
7014
7015 gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7016 ori r3,r3,mpgDormant ; Mark entry dormant
7017 stw r3,mpFlags(r31) ; Save updated flags
7018 eieio ; Ensure update is visible when we unlock
7019
7020 gsuSrchMiss:
7021 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7022 bl sxlkUnlock ; Release host pmap search lock
7023
7024 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7025 mtmsr r29 ; Restore 'rupts, translation
7026 isync ; Throw a small wrench into the pipeline
7027 b gsuPopFrame ; Nothing to do now but pop a frame and return
7028 gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7029 gsuPopFrame:
7030 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7031 ; Get caller's return address
7032 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7033 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7034 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7035 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7036 mtlr r0 ; Prepare return address
7037 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7038 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7039 lwz r1,0(r1) ; Pop stack frame
7040 blr ; Return to caller
7041
7042 ;
7043 ; Guest shadow assist -- test guest mapping reference and change bits
7044 ;
7045 ; Locates the specified guest mapping, and if it exists gathers its reference
7046 ; and change bit, optionallyÊresetting them.
7047 ;
7048 ; Parameters:
7049 ; r3 : address of host pmap, 32-bit kernel virtual address
7050 ; r4 : address of guest pmap, 32-bit kernel virtual address
7051 ; r5 : guest virtual address, high-order 32 bits
7052 ; r6 : guest virtual address, low-order 32 bits
7053 ; r7 : reset boolean
7054 ;
7055 ; Non-volatile register usage:
7056 ; r24 : VMM extension block's physical address
7057 ; r25 : return code (w/reference and change bits)
7058 ; r26 : reset boolean
7059 ; r27 : host pmap physical address
7060 ; r28 : guest pmap physical address
7061 ; r29 : caller's msr image from mapSetUp
7062 ; r30 : guest virtual address
7063 ; r31 : gva->phys mapping's physical address
7064 ;
7065
7066 .align 5
7067 .globl EXT(hw_test_rc_gv)
7068
7069 LEXT(hw_test_rc_gv)
7070
7071 #define gtdStackSize ((31-24+1)*4)+4
7072
7073 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7074 ; Mint a new stack frame
7075 mflr r0 ; Get caller's return address
7076 mfsprg r11,2 ; Get feature flags
7077 mtcrf 0x02,r11 ; Insert feature flags into cr6
7078 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7079 ; Save caller's return address
7080 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7081 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7082 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7083 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7084 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7085 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7086 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7087 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7088
7089 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
7090
7091 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7092 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
7093
7094 bt++ pf64Bitb,gtd64Salt ; Test for 64-bit machine
7095
7096 lwz r24,pmapVmmExtPhys+4(r3) ; r24 <- VMM pmap extension block paddr
7097 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
7098 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
7099 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7100 srwi r11,r30,12 ; Form shadow hash:
7101 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7102 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7103 ; Form index offset from hash page number
7104 add r31,r31,r10 ; r31 <- hash page index entry
7105 lwz r31,4(r31) ; r31 <- hash page paddr
7106 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7107 ; r31 <- hash group paddr
7108 b gtdStart ; Get to it
7109
7110 gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7111 ld r24,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7112 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
7113 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
7114 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7115 srwi r11,r30,12 ; Form shadow hash:
7116 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7117 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7118 ; Form index offset from hash page number
7119 add r31,r31,r10 ; r31 <- hash page index entry
7120 ld r31,0(r31) ; r31 <- hash page paddr
7121 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7122 ; r31 <- hash group paddr
7123
7124 gtdStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
7125 xor r28,r4,r28 ; Convert guest pmap_t virt->real
7126 mr r26,r7 ; Save reset boolean
7127 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7128 mr r29,r11 ; Save caller's msr image
7129
7130 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7131 bl sxlkExclusive ; Get lock exclusive
7132
7133 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7134 mtctr r0 ; in this group
7135 bt++ pf64Bitb,gtd64Search ; Test for 64-bit machine
7136
7137 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7138 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7139 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7140 b gtd32SrchLp ; Let the search begin!
7141
7142 .align 5
7143 gtd32SrchLp:
7144 mr r6,r3 ; r6 <- current mapping slot's flags
7145 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7146 mr r7,r4 ; r7 <- current mapping slot's space ID
7147 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7148 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7149 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7150 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7151 xor r7,r7,r9 ; Compare space ID
7152 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7153 xor r8,r8,r30 ; Compare virtual address
7154 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7155 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7156
7157 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7158 bdnz gtd32SrchLp ; Iterate
7159
7160 mr r6,r3 ; r6 <- current mapping slot's flags
7161 clrrwi r5,r5,12 ; Remove flags from virtual address
7162 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7163 xor r4,r4,r9 ; Compare space ID
7164 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7165 xor r5,r5,r30 ; Compare virtual address
7166 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7167 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7168 b gtdSrchMiss ; No joy in our hash group
7169
7170 gtd64Search:
7171 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7172 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7173 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7174 b gtd64SrchLp ; Let the search begin!
7175
7176 .align 5
7177 gtd64SrchLp:
7178 mr r6,r3 ; r6 <- current mapping slot's flags
7179 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7180 mr r7,r4 ; r7 <- current mapping slot's space ID
7181 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7182 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7183 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7184 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7185 xor r7,r7,r9 ; Compare space ID
7186 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7187 xor r8,r8,r30 ; Compare virtual address
7188 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7189 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7190
7191 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7192 bdnz gtd64SrchLp ; Iterate
7193
7194 mr r6,r3 ; r6 <- current mapping slot's flags
7195 clrrdi r5,r5,12 ; Remove flags from virtual address
7196 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7197 xor r4,r4,r9 ; Compare space ID
7198 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7199 xor r5,r5,r30 ; Compare virtual address
7200 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7201 bne gtdSrchMiss ; No joy in our hash group
7202
7203 gtdSrchHit:
7204 bt++ pf64Bitb,gtdDo64 ; Split for 64 bit
7205
7206 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
7207
7208 cmplwi cr1,r26,0 ; Do we want to clear RC?
7209 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7210 mr. r3,r3 ; Was there a previously valid PTE?
7211 li r0,lo16(mpR|mpC) ; Get bits to clear
7212
7213 and r25,r5,r0 ; Copy RC bits into result
7214 beq++ cr1,gtdNoClr32 ; Nope...
7215
7216 andc r12,r12,r0 ; Clear mapping copy of RC
7217 andc r5,r5,r0 ; Clear PTE copy of RC
7218 sth r12,mpVAddr+6(r31) ; Set the new RC in mapping
7219
7220 gtdNoClr32: beq-- gtdNoOld32 ; No previously valid PTE...
7221
7222 sth r5,6(r3) ; Store updated RC in PTE
7223 eieio ; Make sure we do not reorder
7224 stw r4,0(r3) ; Revalidate the PTE
7225
7226 eieio ; Make sure all updates come first
7227 stw r6,0(r7) ; Unlock PCA
7228
7229 gtdNoOld32: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7230 bl sxlkUnlock ; Unlock the search list
7231 b gtdR32 ; Join common...
7232
7233 .align 5
7234
7235
7236 gtdDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
7237
7238 cmplwi cr1,r26,0 ; Do we want to clear RC?
7239 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7240 mr. r3,r3 ; Was there a previously valid PTE?
7241 li r0,lo16(mpR|mpC) ; Get bits to clear
7242
7243 and r25,r5,r0 ; Copy RC bits into result
7244 beq++ cr1,gtdNoClr64 ; Nope...
7245
7246 andc r12,r12,r0 ; Clear mapping copy of RC
7247 andc r5,r5,r0 ; Clear PTE copy of RC
7248 sth r12,mpVAddr+6(r31) ; Set the new RC
7249
7250 gtdNoClr64: beq-- gtdNoOld64 ; Nope, no pevious pte...
7251
7252 sth r5,14(r3) ; Store updated RC
7253 eieio ; Make sure we do not reorder
7254 std r4,0(r3) ; Revalidate the PTE
7255
7256 eieio ; Make sure all updates come first
7257 stw r6,0(r7) ; Unlock PCA
7258
7259 gtdNoOld64: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7260 bl sxlkUnlock ; Unlock the search list
7261 b gtdR64 ; Join common...
7262
7263 gtdSrchMiss:
7264 la r3,pmapSXlk(r27) ; Point to the pmap search lock
7265 bl sxlkUnlock ; Unlock the search list
7266 li r25,mapRtNotFnd ; Get ready to return not found
7267 bt++ pf64Bitb,gtdR64 ; Test for 64-bit machine
7268
7269 gtdR32: mtmsr r29 ; Restore caller's msr image
7270 isync
7271 b gtdEpilog
7272
7273 gtdR64: mtmsrd r29 ; Restore caller's msr image
7274
7275 gtdEpilog: lwz r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7276 ; Get caller's return address
7277 mr r3,r25 ; Get return code
7278 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7279 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7280 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7281 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7282 mtlr r0 ; Prepare return address
7283 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7284 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7285 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7286 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7287 lwz r1,0(r1) ; Pop stack frame
7288 blr ; Return to caller
7289
7290 ;
7291 ; Guest shadow assist -- convert guest to host virtual address
7292 ;
7293 ; Locates the specified guest mapping, and if it exists locates the
7294 ; first mapping belonging to its host on the physical chain and returns
7295 ; its virtual address.
7296 ;
7297 ; Note that if there are multiple mappings belonging to this host
7298 ; chained to the physent to which the guest mapping is chained, then
7299 ; host virtual aliases exist for this physical address. If host aliases
7300 ; exist, then we select the first on the physent chain, making it
7301 ; unpredictable which of the two or more possible host virtual addresses
7302 ; will be returned.
7303 ;
7304 ; Parameters:
7305 ; r3 : address of guest pmap, 32-bit kernel virtual address
7306 ; r4 : guest virtual address, high-order 32 bits
7307 ; r5 : guest virtual address, low-order 32 bits
7308 ;
7309 ; Non-volatile register usage:
7310 ; r24 : physent physical address
7311 ; r25 : VMM extension block's physical address
7312 ; r26 : host virtual address
7313 ; r27 : host pmap physical address
7314 ; r28 : guest pmap physical address
7315 ; r29 : caller's msr image from mapSetUp
7316 ; r30 : guest virtual address
7317 ; r31 : gva->phys mapping's physical address
7318 ;
7319
7320 .align 5
7321 .globl EXT(hw_gva_to_hva)
7322
7323 LEXT(hw_gva_to_hva)
7324
7325 #define gthStackSize ((31-24+1)*4)+4
7326
7327 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7328 ; Mint a new stack frame
7329 mflr r0 ; Get caller's return address
7330 mfsprg r11,2 ; Get feature flags
7331 mtcrf 0x02,r11 ; Insert feature flags into cr6
7332 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7333 ; Save caller's return address
7334 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7335 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7336 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7337 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7338 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7339 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7340 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7341 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7342
7343 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7344
7345 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7346 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7347
7348 bt++ pf64Bitb,gth64Salt ; Test for 64-bit machine
7349
7350 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7351 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7352 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7353 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7354 srwi r11,r30,12 ; Form shadow hash:
7355 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7356 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7357 ; Form index offset from hash page number
7358 add r31,r31,r10 ; r31 <- hash page index entry
7359 lwz r31,4(r31) ; r31 <- hash page paddr
7360 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7361 ; r31 <- hash group paddr
7362 b gthStart ; Get to it
7363
7364 gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7365 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7366 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7367 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7368 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7369 srwi r11,r30,12 ; Form shadow hash:
7370 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7371 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7372 ; Form index offset from hash page number
7373 add r31,r31,r10 ; r31 <- hash page index entry
7374 ld r31,0(r31) ; r31 <- hash page paddr
7375 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7376 ; r31 <- hash group paddr
7377
7378 gthStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7379 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7380 mr r29,r11 ; Save caller's msr image
7381
7382 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7383 bl sxlkExclusive ; Get lock exclusive
7384
7385 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7386 mtctr r0 ; in this group
7387 bt++ pf64Bitb,gth64Search ; Test for 64-bit machine
7388
7389 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7390 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7391 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7392 b gth32SrchLp ; Let the search begin!
7393
7394 .align 5
7395 gth32SrchLp:
7396 mr r6,r3 ; r6 <- current mapping slot's flags
7397 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7398 mr r7,r4 ; r7 <- current mapping slot's space ID
7399 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7400 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7401 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7402 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7403 xor r7,r7,r9 ; Compare space ID
7404 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7405 xor r8,r8,r30 ; Compare virtual address
7406 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7407 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7408
7409 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7410 bdnz gth32SrchLp ; Iterate
7411
7412 mr r6,r3 ; r6 <- current mapping slot's flags
7413 clrrwi r5,r5,12 ; Remove flags from virtual address
7414 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7415 xor r4,r4,r9 ; Compare space ID
7416 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7417 xor r5,r5,r30 ; Compare virtual address
7418 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7419 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7420 b gthSrchMiss ; No joy in our hash group
7421
7422 gth64Search:
7423 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7424 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7425 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7426 b gth64SrchLp ; Let the search begin!
7427
7428 .align 5
7429 gth64SrchLp:
7430 mr r6,r3 ; r6 <- current mapping slot's flags
7431 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7432 mr r7,r4 ; r7 <- current mapping slot's space ID
7433 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7434 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7435 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7436 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7437 xor r7,r7,r9 ; Compare space ID
7438 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7439 xor r8,r8,r30 ; Compare virtual address
7440 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7441 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7442
7443 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7444 bdnz gth64SrchLp ; Iterate
7445
7446 mr r6,r3 ; r6 <- current mapping slot's flags
7447 clrrdi r5,r5,12 ; Remove flags from virtual address
7448 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7449 xor r4,r4,r9 ; Compare space ID
7450 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7451 xor r5,r5,r30 ; Compare virtual address
7452 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7453 bne gthSrchMiss ; No joy in our hash group
7454
7455 gthSrchHit: lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
7456 bl mapFindLockPN ; Find 'n' lock this page's physent
7457 mr. r24,r3 ; Got lock on our physent?
7458 beq-- gthBadPLock ; No, time to bail out
7459
7460 bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
7461
7462 lwz r9,ppLink+4(r24) ; Get first mapping on physent
7463 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7464 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
7465 gthPELoop: mr. r12,r9 ; Got a mapping to look at?
7466 beq- gthPEMiss ; Nope, we've missed hva->phys mapping
7467 lwz r7,mpFlags(r12) ; Get mapping's flags
7468 lhz r4,mpSpace(r12) ; Get mapping's space id number
7469 lwz r26,mpVAddr+4(r12) ; Get mapping's virtual address
7470 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
7471
7472 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7473 rlwinm r26,r26,0,~mpHWFlags ; Bye-bye unsightly flags
7474 xori r0,r0,mpNormal ; Normal mapping?
7475 xor r4,r4,r6 ; Compare w/ host space id number
7476 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7477 beq gthPEHit ; Hit
7478 b gthPELoop ; Iterate
7479
7480 gthPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
7481 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
7482 ld r9,ppLink(r24) ; Get first mapping on physent
7483 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7484 andc r9,r9,r0 ; Cleanup mapping pointer
7485 gthPELp64: mr. r12,r9 ; Got a mapping to look at?
7486 beq-- gthPEMiss ; Nope, we've missed hva->phys mapping
7487 lwz r7,mpFlags(r12) ; Get mapping's flags
7488 lhz r4,mpSpace(r12) ; Get mapping's space id number
7489 ld r26,mpVAddr(r12) ; Get mapping's virtual address
7490 ld r9,mpAlias(r12) ; Next mapping physent alias chain
7491 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7492 rldicr r26,r26,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
7493 xori r0,r0,mpNormal ; Normal mapping?
7494 xor r4,r4,r6 ; Compare w/ host space id number
7495 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7496 beq gthPEHit ; Hit
7497 b gthPELp64 ; Iterate
7498
7499 .align 5
7500 gthPEMiss: mr r3,r24 ; Get physent's address
7501 bl mapPhysUnlock ; Unlock physent chain
7502 gthSrchMiss:
7503 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7504 bl sxlkUnlock ; Release host pmap search lock
7505 li r3,-1 ; Return 64-bit -1
7506 li r4,-1
7507 bt++ pf64Bitb,gthEpi64 ; Take 64-bit exit
7508 b gthEpi32 ; Take 32-bit exit
7509
7510 .align 5
7511 gthPEHit: mr r3,r24 ; Get physent's address
7512 bl mapPhysUnlock ; Unlock physent chain
7513 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7514 bl sxlkUnlock ; Release host pmap search lock
7515
7516 bt++ pf64Bitb,gthR64 ; Test for 64-bit machine
7517
7518 gthR32: li r3,0 ; High-order 32 bits host virtual address
7519 mr r4,r26 ; Low-order 32 bits host virtual address
7520 gthEpi32: mtmsr r29 ; Restore caller's msr image
7521 isync
7522 b gthEpilog
7523
7524 .align 5
7525 gthR64: srdi r3,r26,32 ; High-order 32 bits host virtual address
7526 clrldi r4,r26,32 ; Low-order 32 bits host virtual address
7527 gthEpi64: mtmsrd r29 ; Restore caller's msr image
7528
7529 gthEpilog: lwz r0,(FM_ALIGN(gthStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7530 ; Get caller's return address
7531 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7532 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7533 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7534 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7535 mtlr r0 ; Prepare return address
7536 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7537 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7538 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7539 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7540 lwz r1,0(r1) ; Pop stack frame
7541 blr ; Return to caller
7542
7543 gthBadPLock:
7544 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
7545 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
7546 li r3,failMapping ; The BOMB, Dmitri.
7547 sc ; The hydrogen bomb.
7548
7549
7550 ;
7551 ; Guest shadow assist -- find a guest mapping
7552 ;
7553 ; Locates the specified guest mapping, and if it exists returns a copy
7554 ; of it.
7555 ;
7556 ; Parameters:
7557 ; r3 : address of guest pmap, 32-bit kernel virtual address
7558 ; r4 : guest virtual address, high-order 32 bits
7559 ; r5 : guest virtual address, low-order 32 bits
7560 ; r6 : 32 byte copy area, 32-bit kernel virtual address
7561 ;
7562 ; Non-volatile register usage:
7563 ; r25 : VMM extension block's physical address
7564 ; r26 : copy area virtual address
7565 ; r27 : host pmap physical address
7566 ; r28 : guest pmap physical address
7567 ; r29 : caller's msr image from mapSetUp
7568 ; r30 : guest virtual address
7569 ; r31 : gva->phys mapping's physical address
7570 ;
7571
7572 .align 5
7573 .globl EXT(hw_find_map_gv)
7574
7575 LEXT(hw_find_map_gv)
7576
7577 #define gfmStackSize ((31-25+1)*4)+4
7578
7579 stwu r1,-(FM_ALIGN(gfmStackSize)+FM_SIZE)(r1)
7580 ; Mint a new stack frame
7581 mflr r0 ; Get caller's return address
7582 mfsprg r11,2 ; Get feature flags
7583 mtcrf 0x02,r11 ; Insert feature flags into cr6
7584 stw r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7585 ; Save caller's return address
7586 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7587 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7588 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7589 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7590 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7591 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7592 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7593
7594 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7595 mr r26,r6 ; Copy copy buffer vaddr
7596
7597 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7598 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7599
7600 bt++ pf64Bitb,gfm64Salt ; Test for 64-bit machine
7601
7602 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7603 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7604 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7605 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7606 srwi r11,r30,12 ; Form shadow hash:
7607 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7608 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7609 ; Form index offset from hash page number
7610 add r31,r31,r10 ; r31 <- hash page index entry
7611 lwz r31,4(r31) ; r31 <- hash page paddr
7612 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7613 ; r31 <- hash group paddr
7614 b gfmStart ; Get to it
7615
7616 gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7617 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7618 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7619 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7620 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7621 srwi r11,r30,12 ; Form shadow hash:
7622 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7623 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7624 ; Form index offset from hash page number
7625 add r31,r31,r10 ; r31 <- hash page index entry
7626 ld r31,0(r31) ; r31 <- hash page paddr
7627 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7628 ; r31 <- hash group paddr
7629
7630 gfmStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7631 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7632 mr r29,r11 ; Save caller's msr image
7633
7634 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7635 bl sxlkExclusive ; Get lock exclusive
7636
7637 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7638 mtctr r0 ; in this group
7639 bt++ pf64Bitb,gfm64Search ; Test for 64-bit machine
7640
7641 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7642 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7643 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7644 b gfm32SrchLp ; Let the search begin!
7645
7646 .align 5
7647 gfm32SrchLp:
7648 mr r6,r3 ; r6 <- current mapping slot's flags
7649 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7650 mr r7,r4 ; r7 <- current mapping slot's space ID
7651 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7652 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7653 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7654 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7655 xor r7,r7,r9 ; Compare space ID
7656 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7657 xor r8,r8,r30 ; Compare virtual address
7658 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7659 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7660
7661 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7662 bdnz gfm32SrchLp ; Iterate
7663
7664 mr r6,r3 ; r6 <- current mapping slot's flags
7665 clrrwi r5,r5,12 ; Remove flags from virtual address
7666 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7667 xor r4,r4,r9 ; Compare space ID
7668 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7669 xor r5,r5,r30 ; Compare virtual address
7670 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7671 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7672 b gfmSrchMiss ; No joy in our hash group
7673
7674 gfm64Search:
7675 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7676 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7677 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7678 b gfm64SrchLp ; Let the search begin!
7679
7680 .align 5
7681 gfm64SrchLp:
7682 mr r6,r3 ; r6 <- current mapping slot's flags
7683 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7684 mr r7,r4 ; r7 <- current mapping slot's space ID
7685 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7686 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7687 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7688 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7689 xor r7,r7,r9 ; Compare space ID
7690 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7691 xor r8,r8,r30 ; Compare virtual address
7692 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7693 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7694
7695 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7696 bdnz gfm64SrchLp ; Iterate
7697
7698 mr r6,r3 ; r6 <- current mapping slot's flags
7699 clrrdi r5,r5,12 ; Remove flags from virtual address
7700 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7701 xor r4,r4,r9 ; Compare space ID
7702 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7703 xor r5,r5,r30 ; Compare virtual address
7704 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7705 bne gfmSrchMiss ; No joy in our hash group
7706
7707 gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
7708 lwz r6,4(r31) ; +4
7709 lwz r7,8(r31) ; +8
7710 lwz r8,12(r31) ; +12
7711 lwz r9,16(r31) ; +16
7712 lwz r10,20(r31) ; +20
7713 lwz r11,24(r31) ; +24
7714 lwz r12,28(r31) ; +28
7715
7716 li r31,mapRtOK ; Return found mapping
7717
7718 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7719 bl sxlkUnlock ; Release host pmap search lock
7720
7721 bt++ pf64Bitb,gfmEpi64 ; Test for 64-bit machine
7722
7723 gfmEpi32: mtmsr r29 ; Restore caller's msr image
7724 isync ; A small wrench
7725 b gfmEpilog ; and a larger bubble
7726
7727 .align 5
7728 gfmEpi64: mtmsrd r29 ; Restore caller's msr image
7729
7730 gfmEpilog: mr. r3,r31 ; Copy/test mapping address
7731 beq gfmNotFound ; Skip copy if no mapping found
7732
7733 stw r5,0(r26) ; Store 32 bytes of mapping into virtual
7734 stw r6,4(r26) ; +4
7735 stw r7,8(r26) ; +8
7736 stw r8,12(r26) ; +12
7737 stw r9,16(r26) ; +16
7738 stw r10,20(r26) ; +20
7739 stw r11,24(r26) ; +24
7740 stw r12,28(r26) ; +28
7741
7742 gfmNotFound:
7743 lwz r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7744 ; Get caller's return address
7745 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7746 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7747 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7748 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7749 mtlr r0 ; Prepare return address
7750 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7751 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7752 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7753 lwz r1,0(r1) ; Pop stack frame
7754 blr ; Return to caller
7755
7756 .align 5
7757 gfmSrchMiss:
7758 li r31,mapRtNotFnd ; Indicate mapping not found
7759 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7760 bl sxlkUnlock ; Release host pmap search lock
7761 bt++ pf64Bitb,gfmEpi64 ; Take 64-bit exit
7762 b gfmEpi32 ; Take 32-bit exit
7763
7764
7765 ;
7766 ; Guest shadow assist -- change guest page protection
7767 ;
7768 ; Locates the specified dormant mapping, and if it is active, changes its
7769 ; protection.
7770 ;
7771 ; Parameters:
7772 ; r3 : address of guest pmap, 32-bit kernel virtual address
7773 ; r4 : guest virtual address, high-order 32 bits
7774 ; r5 : guest virtual address, low-order 32 bits
7775 ; r6 : guest mapping protection code
7776 ;
7777 ; Non-volatile register usage:
7778 ; r25 : caller's msr image from mapSetUp
7779 ; r26 : guest mapping protection code
7780 ; r27 : host pmap physical address
7781 ; r28 : guest pmap physical address
7782 ; r29 : VMM extension block's physical address
7783 ; r30 : guest virtual address
7784 ; r31 : gva->phys mapping's physical address
7785 ;
7786 .align 5
7787 .globl EXT(hw_protect_gv)
7788
7789 LEXT(hw_protect_gv)
7790
7791 #define gcpStackSize ((31-24+1)*4)+4
7792
7793 stwu r1,-(FM_ALIGN(gcpStackSize)+FM_SIZE)(r1)
7794 ; Mint a new stack frame
7795 mflr r0 ; Get caller's return address
7796 mfsprg r11,2 ; Get feature flags
7797 mtcrf 0x02,r11 ; Insert feature flags into cr6
7798 stw r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7799 ; Save caller's return address
7800 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7801 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7802 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7803 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7804 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7805 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7806 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7807
7808 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7809 mr r26,r6 ; Copy guest mapping protection code
7810
7811 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7812 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7813 bt++ pf64Bitb,gcp64Salt ; Handle 64-bit machine separately
7814 lwz r29,pmapVmmExtPhys+4(r3) ; r29 <- VMM pmap extension block paddr
7815 lwz r27,vmxHostPmapPhys+4(r11) ; r27 <- host pmap paddr
7816 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7817 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7818 srwi r11,r30,12 ; Form shadow hash:
7819 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7820 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7821 ; Form index offset from hash page number
7822 add r31,r31,r10 ; r31 <- hash page index entry
7823 lwz r31,4(r31) ; r31 <- hash page paddr
7824 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7825 ; r31 <- hash group paddr
7826 b gcpStart ; Get to it
7827
7828 gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7829 ld r29,pmapVmmExtPhys(r3) ; r29 <- VMM pmap extension block paddr
7830 ld r27,vmxHostPmapPhys(r11) ; r27 <- host pmap paddr
7831 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7832 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7833 srwi r11,r30,12 ; Form shadow hash:
7834 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7835 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7836 ; Form index offset from hash page number
7837 add r31,r31,r10 ; r31 <- hash page index entry
7838 ld r31,0(r31) ; r31 <- hash page paddr
7839 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7840 ; r31 <- hash group paddr
7841
7842 gcpStart: xor r28,r4,r28 ; Convert guest pmap_t virt->real
7843 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7844 mr r25,r11 ; Save caller's msr image
7845
7846 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7847 bl sxlkExclusive ; Get lock exclusive
7848
7849 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7850 mtctr r0 ; in this group
7851 bt++ pf64Bitb,gcp64Search ; Test for 64-bit machine
7852
7853 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7854 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7855 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7856 b gcp32SrchLp ; Let the search begin!
7857
7858 .align 5
7859 gcp32SrchLp:
7860 mr r6,r3 ; r6 <- current mapping slot's flags
7861 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7862 mr r7,r4 ; r7 <- current mapping slot's space ID
7863 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7864 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7865 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7866 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7867 xor r7,r7,r9 ; Compare space ID
7868 or r0,r11,r7 ; r0 <- free || dormant || !space match
7869 xor r8,r8,r30 ; Compare virtual address
7870 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7871 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7872
7873 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7874 bdnz gcp32SrchLp ; Iterate
7875
7876 mr r6,r3 ; r6 <- current mapping slot's flags
7877 clrrwi r5,r5,12 ; Remove flags from virtual address
7878 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7879 xor r4,r4,r9 ; Compare space ID
7880 or r0,r11,r4 ; r0 <- free || dormant || !space match
7881 xor r5,r5,r30 ; Compare virtual address
7882 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7883 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7884 b gcpSrchMiss ; No joy in our hash group
7885
7886 gcp64Search:
7887 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7888 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7889 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7890 b gcp64SrchLp ; Let the search begin!
7891
7892 .align 5
7893 gcp64SrchLp:
7894 mr r6,r3 ; r6 <- current mapping slot's flags
7895 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7896 mr r7,r4 ; r7 <- current mapping slot's space ID
7897 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7898 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7899 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7900 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7901 xor r7,r7,r9 ; Compare space ID
7902 or r0,r11,r7 ; r0 <- free || dormant || !space match
7903 xor r8,r8,r30 ; Compare virtual address
7904 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7905 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7906
7907 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7908 bdnz gcp64SrchLp ; Iterate
7909
7910 mr r6,r3 ; r6 <- current mapping slot's flags
7911 clrrdi r5,r5,12 ; Remove flags from virtual address
7912 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7913 xor r4,r4,r9 ; Compare space ID
7914 or r0,r11,r4 ; r0 <- free || dormant || !space match
7915 xor r5,r5,r30 ; Compare virtual address
7916 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7917 bne gcpSrchMiss ; No joy in our hash group
7918
7919 gcpSrchHit:
7920 bt++ pf64Bitb,gcpDscon64 ; Handle 64-bit disconnect separately
7921 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
7922 ; r31 <- mapping's physical address
7923 ; r3 -> PTE slot physical address
7924 ; r4 -> High-order 32 bits of PTE
7925 ; r5 -> Low-order 32 bits of PTE
7926 ; r6 -> PCA
7927 ; r7 -> PCA physical address
7928 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7929 b gcpFreePTE ; Join 64-bit path to release the PTE
7930 gcpDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7931 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7932 gcpFreePTE: mr. r3,r3 ; Was there a valid PTE?
7933 beq- gcpSetKey ; No valid PTE, we're almost done
7934 lis r0,0x8000 ; Prepare free bit for this slot
7935 srw r0,r0,r2 ; Position free bit
7936 or r6,r6,r0 ; Set it in our PCA image
7937 lwz r8,mpPte(r31) ; Get PTE pointer
7938 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7939 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7940 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7941 stw r6,0(r7) ; Update PCA and unlock the PTEG
7942
7943 gcpSetKey: lwz r0,mpVAddr+4(r31) ; Get va word containing protection bits
7944 rlwimi r0,r26,0,mpPP ; Insert new protection bits
7945 stw r0,mpVAddr+4(r31) ; Write 'em back
7946 eieio ; Ensure previous mapping updates are visible
7947 li r31,mapRtOK ; I'm a success
7948
7949 gcpRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7950 bl sxlkUnlock ; Release host pmap search lock
7951
7952 mr r3,r31 ; r3 <- result code
7953 bt++ pf64Bitb,gcpRtn64 ; Handle 64-bit separately
7954 mtmsr r25 ; Restore 'rupts, translation
7955 isync ; Throw a small wrench into the pipeline
7956 b gcpPopFrame ; Nothing to do now but pop a frame and return
7957 gcpRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
7958 gcpPopFrame:
7959 lwz r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7960 ; Get caller's return address
7961 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7962 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7963 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7964 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7965 mtlr r0 ; Prepare return address
7966 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7967 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7968 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7969 lwz r1,0(r1) ; Pop stack frame
7970 blr ; Return to caller
7971
7972 .align 5
7973 gcpSrchMiss:
7974 li r31,mapRtNotFnd ; Could not locate requested mapping
7975 b gcpRelPmap ; Exit through host pmap search lock release
7976
7977
7978 ;
7979 ; Find the physent based on a physical page and try to lock it (but not too hard)
7980 ; Note that this table always has an entry that with a 0 table pointer at the end
7981 ;
7982 ; R3 contains ppnum on entry
7983 ; R3 is 0 if no entry was found
7984 ; R3 is physent if found
7985 ; cr0_eq is true if lock was obtained or there was no entry to lock
7986 ; cr0_eq is false of there was an entry and it was locked
7987 ;
7988
7989 .align 5
7990
7991 mapFindPhyTry:
7992 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7993 mr r2,r3 ; Save our target
7994 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7995
7996 mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
7997 lwz r5,mrStart(r9) ; Get start of table entry
7998 lwz r0,mrEnd(r9) ; Get end of table entry
7999 addi r9,r9,mrSize ; Point to the next slot
8000 cmplwi cr2,r3,0 ; Are we at the end of the table?
8001 cmplw r2,r5 ; See if we are in this table
8002 cmplw cr1,r2,r0 ; Check end also
8003 sub r4,r2,r5 ; Calculate index to physical entry
8004 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
8005 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
8006 slwi r4,r4,3 ; Get offset to physical entry
8007
8008 blt-- mapFindPhz ; Did not find it...
8009
8010 add r3,r3,r4 ; Point right to the slot
8011
8012 mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
8013 rlwinm. r0,r2,0,0,0 ; Is it locked?
8014 bnelr-- ; Yes it is...
8015
8016 lwarx r2,0,r3 ; Get the lock
8017 rlwinm. r0,r2,0,0,0 ; Is it locked?
8018 oris r0,r2,0x8000 ; Set the lock bit
8019 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
8020 stwcx. r0,0,r3 ; Try to stuff it back...
8021 bne-- mapFindOv ; Collision, try again...
8022 isync ; Clear any speculations
8023 blr ; Leave...
8024
8025 mapFindKl: li r2,lgKillResv ; Killing field
8026 stwcx. r2,0,r2 ; Trash reservation...
8027 crclr cr0_eq ; Make sure we do not think we got the lock
8028 blr ; Leave...
8029
8030 mapFindNo: crset cr0_eq ; Make sure that we set this
8031 li r3,0 ; Show that we did not find it
8032 blr ; Leave...
8033 ;
8034 ; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
8035 ;
8036 ; How the pmap cache lookup works:
8037 ;
8038 ; We use a combination of three things: a mask of valid entries, a sub-tag, and the
8039 ; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
8040 ; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
8041 ; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
8042 ; entry contains the full 36 bit ESID.
8043 ;
8044 ; The purpose of the sub-tag is to limit the number of searches necessary when looking
8045 ; for an existing cache entry. Because there are 16 slots in the cache, we could end up
8046 ; searching all 16 if an match is not found.
8047 ;
8048 ; Essentially, we will search only the slots that have a valid entry and whose sub-tag
8049 ; matches. More than likely, we will eliminate almost all of the searches.
8050 ;
8051 ; Inputs:
8052 ; R3 = pmap
8053 ; R4 = ESID high half
8054 ; R5 = ESID low half
8055 ;
8056 ; Outputs:
8057 ; R3 = pmap cache slot if found, 0 if not
8058 ; R10 = pmapCCtl address
8059 ; R11 = pmapCCtl image
8060 ; pmapCCtl locked on exit
8061 ;
8062
8063 .align 5
8064
8065 pmapCacheLookup:
8066 la r10,pmapCCtl(r3) ; Point to the segment cache control
8067
8068 pmapCacheLookuq:
8069 lwarx r11,0,r10 ; Get the segment cache control value
8070 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8071 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
8072 bne-- pmapCacheLookur ; Nope...
8073 stwcx. r0,0,r10 ; Try to take the lock
8074 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
8075
8076 isync ; Make sure we get reservation first
8077 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
8078 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
8079 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
8080 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
8081 lis r8,0x8888 ; Get some eights
8082 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
8083 ori r8,r8,0x8888 ; Fill the rest with eights
8084
8085 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
8086 eqv r9,r9,r5 ; Get 0xF where we hit in top half
8087
8088 rlwinm r2,r10,1,0,30 ; Shift over 1
8089 rlwinm r0,r9,1,0,30 ; Shift over 1
8090 and r2,r2,r10 ; AND the even/odd pair into the even
8091 and r0,r0,r9 ; AND the even/odd pair into the even
8092 rlwinm r10,r2,2,0,28 ; Shift over 2
8093 rlwinm r9,r0,2,0,28 ; Shift over 2
8094 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8095 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8096
8097 and r10,r10,r8 ; Clear out extras
8098 and r9,r9,r8 ; Clear out extras
8099
8100 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
8101 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
8102 or r10,r0,r10 ; Merge them
8103 or r9,r2,r9 ; Merge them
8104 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
8105 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
8106 or r10,r0,r10 ; Merge them
8107 or r9,r2,r9 ; Merge them
8108 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
8109 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
8110 not r6,r11 ; Turn invalid into valid
8111 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
8112
8113 la r10,pmapSegCache(r3) ; Point at the cache slots
8114 and. r6,r9,r6 ; Get mask of valid and hit
8115 li r0,0 ; Clear
8116 li r3,0 ; Assume not found
8117 oris r0,r0,0x8000 ; Start a mask
8118 beqlr++ ; Leave, should usually be no hits...
8119
8120 pclNextEnt: cntlzw r5,r6 ; Find an in use one
8121 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
8122 rlwinm r7,r5,4,0,27 ; Index to the cache entry
8123 srw r2,r0,r5 ; Get validity mask bit
8124 add r7,r7,r10 ; Point to the cache slot
8125 andc r6,r6,r2 ; Clear the validity bit we just tried
8126 bgelr-- cr1 ; Leave if there are no more to check...
8127
8128 lwz r5,sgcESID(r7) ; Get the top half
8129
8130 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
8131
8132 bne++ pclNextEnt ; Nope, try again...
8133
8134 mr r3,r7 ; Point to the slot
8135 blr ; Leave....
8136
8137 .align 5
8138
8139 pmapCacheLookur:
8140 li r11,lgKillResv ; The killing spot
8141 stwcx. r11,0,r11 ; Kill the reservation
8142
8143 pmapCacheLookus:
8144 lwz r11,pmapCCtl(r3) ; Get the segment cache control
8145 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8146 beq++ pmapCacheLookup ; Nope...
8147 b pmapCacheLookus ; Yup, keep waiting...
8148
8149
8150 ;
8151 ; mapMergeRC -- Given a physical mapping address in R31, locate its
8152 ; connected PTE (if any) and merge the PTE referenced and changed bits
8153 ; into the mapping and physent.
8154 ;
8155
8156 .align 5
8157
8158 mapMergeRC32:
8159 lwz r0,mpPte(r31) ; Grab the PTE offset
8160 mfsdr1 r7 ; Get the pointer to the hash table
8161 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8162 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8163 andi. r3,r0,mpHValid ; Is there a possible PTE?
8164 srwi r7,r0,4 ; Convert to PCA units
8165 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8166 mflr r2 ; Save the return
8167 subfic r7,r7,-4 ; Convert to -4 based negative index
8168 add r7,r10,r7 ; Point to the PCA directly
8169 beqlr-- ; There was no PTE to start with...
8170
8171 bl mapLockPteg ; Lock the PTEG
8172
8173 lwz r0,mpPte(r31) ; Grab the PTE offset
8174 mtlr r2 ; Restore the LR
8175 andi. r3,r0,mpHValid ; Is there a possible PTE?
8176 beq- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8177
8178 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8179 add r3,r3,r10 ; Point to actual PTE
8180 lwz r5,4(r3) ; Get the real part of the PTE
8181 srwi r10,r5,12 ; Change physical address to a ppnum
8182
8183 mMNmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8184 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8185 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8186 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8187 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8188 add r11,r11,r8 ; Point to the bank table
8189 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8190 lwz r11,mrStart(r11) ; Get the start of bank
8191 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8192 addi r2,r2,4 ; Offset to last half of field
8193 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8194 sub r11,r10,r11 ; Get the index into the table
8195 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8196
8197 mMmrgRC: lwarx r10,r11,r2 ; Get the master RC
8198 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8199 or r0,r0,r10 ; Merge in the new RC
8200 stwcx. r0,r11,r2 ; Try to stick it back
8201 bne-- mMmrgRC ; Try again if we collided...
8202 eieio ; Commit all updates
8203
8204 mMPUnlock:
8205 stw r6,0(r7) ; Unlock PTEG
8206 blr ; Return
8207
8208 ;
8209 ; 64-bit version of mapMergeRC
8210 ;
8211 .align 5
8212
8213 mapMergeRC64:
8214 lwz r0,mpPte(r31) ; Grab the PTE offset
8215 ld r5,mpVAddr(r31) ; Grab the virtual address
8216 mfsdr1 r7 ; Get the pointer to the hash table
8217 rldicr r10,r7,0,45 ; Clean up the hash table base
8218 andi. r3,r0,mpHValid ; Is there a possible PTE?
8219 srdi r7,r0,5 ; Convert to PCA units
8220 rldicr r7,r7,0,61 ; Clean up PCA
8221 subfic r7,r7,-4 ; Convert to -4 based negative index
8222 mflr r2 ; Save the return
8223 add r7,r10,r7 ; Point to the PCA directly
8224 beqlr-- ; There was no PTE to start with...
8225
8226 bl mapLockPteg ; Lock the PTEG
8227
8228 lwz r0,mpPte(r31) ; Grab the PTE offset again
8229 mtlr r2 ; Restore the LR
8230 andi. r3,r0,mpHValid ; Is there a possible PTE?
8231 beq-- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8232
8233 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8234 add r3,r3,r10 ; Point to the actual PTE
8235 ld r5,8(r3) ; Get the real part
8236 srdi r10,r5,12 ; Change physical address to a ppnum
8237 b mMNmerge ; Join the common 32-64-bit code...
8238
8239
8240 ;
8241 ; This routine, given a mapping, will find and lock the PTEG
8242 ; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
8243 ; PTEG and return. In this case we will have undefined in R4
8244 ; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
8245 ;
8246 ; If the mapping is still valid, we will invalidate the PTE and merge
8247 ; the RC bits into the physent and also save them into the mapping.
8248 ;
8249 ; We then return with R3 pointing to the PTE slot, R4 is the
8250 ; top of the PTE and R5 is the bottom. R6 contains the PCA.
8251 ; R7 points to the PCA entry.
8252 ;
8253 ; Note that we should NEVER be called on a block or special mapping.
8254 ; We could do many bad things.
8255 ;
8256
8257 .align 5
8258
8259 mapInvPte32:
8260 lwz r0,mpPte(r31) ; Grab the PTE offset
8261 mfsdr1 r7 ; Get the pointer to the hash table
8262 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8263 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8264 andi. r3,r0,mpHValid ; Is there a possible PTE?
8265 srwi r7,r0,4 ; Convert to PCA units
8266 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8267 mflr r2 ; Save the return
8268 subfic r7,r7,-4 ; Convert to -4 based negative index
8269 add r7,r10,r7 ; Point to the PCA directly
8270 beqlr-- ; There was no PTE to start with...
8271
8272 bl mapLockPteg ; Lock the PTEG
8273
8274 lwz r0,mpPte(r31) ; Grab the PTE offset
8275 mtlr r2 ; Restore the LR
8276 andi. r3,r0,mpHValid ; Is there a possible PTE?
8277 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8278
8279 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8280 add r3,r3,r10 ; Point to actual PTE
8281 lwz r4,0(r3) ; Get the top of the PTE
8282
8283 li r8,tlbieLock ; Get the TLBIE lock
8284 rlwinm r0,r4,0,1,31 ; Clear the valid bit
8285 stw r0,0(r3) ; Invalidate the PTE
8286
8287 sync ; Make sure everyone sees the invalidate
8288
8289 mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
8290 mfsprg r2,2 ; Get feature flags
8291 mr. r0,r0 ; Is it locked?
8292 li r0,1 ; Get our lock word
8293 bne- mITLBIE32 ; It is locked, go wait...
8294
8295 stwcx. r0,0,r8 ; Try to get it
8296 bne- mITLBIE32 ; We was beat...
8297
8298 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
8299 li r0,0 ; Lock clear value
8300
8301 tlbie r5 ; Invalidate it everywhere
8302
8303 beq- mINoTS32 ; Can not have MP on this machine...
8304
8305 eieio ; Make sure that the tlbie happens first
8306 tlbsync ; Wait for everyone to catch up
8307 sync ; Make sure of it all
8308
8309 mINoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
8310 lwz r5,4(r3) ; Get the real part
8311 srwi r10,r5,12 ; Change physical address to a ppnum
8312
8313 mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8314 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8315 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8316 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8317 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8318 add r11,r11,r8 ; Point to the bank table
8319 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8320 lwz r11,mrStart(r11) ; Get the start of bank
8321 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8322 addi r2,r2,4 ; Offset to last half of field
8323 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8324 sub r11,r10,r11 ; Get the index into the table
8325 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8326
8327
8328 mImrgRC: lwarx r10,r11,r2 ; Get the master RC
8329 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8330 or r0,r0,r10 ; Merge in the new RC
8331 stwcx. r0,r11,r2 ; Try to stick it back
8332 bne-- mImrgRC ; Try again if we collided...
8333
8334 blr ; Leave with the PCA still locked up...
8335
8336 mIPUnlock: eieio ; Make sure all updates come first
8337
8338 stw r6,0(r7) ; Unlock
8339 blr
8340
8341 ;
8342 ; 64-bit version
8343 ;
8344 .align 5
8345
8346 mapInvPte64:
8347 lwz r0,mpPte(r31) ; Grab the PTE offset
8348 ld r5,mpVAddr(r31) ; Grab the virtual address
8349 mfsdr1 r7 ; Get the pointer to the hash table
8350 rldicr r10,r7,0,45 ; Clean up the hash table base
8351 andi. r3,r0,mpHValid ; Is there a possible PTE?
8352 srdi r7,r0,5 ; Convert to PCA units
8353 rldicr r7,r7,0,61 ; Clean up PCA
8354 subfic r7,r7,-4 ; Convert to -4 based negative index
8355 mflr r2 ; Save the return
8356 add r7,r10,r7 ; Point to the PCA directly
8357 beqlr-- ; There was no PTE to start with...
8358
8359 bl mapLockPteg ; Lock the PTEG
8360
8361 lwz r0,mpPte(r31) ; Grab the PTE offset again
8362 mtlr r2 ; Restore the LR
8363 andi. r3,r0,mpHValid ; Is there a possible PTE?
8364 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8365
8366 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8367 add r3,r3,r10 ; Point to the actual PTE
8368 ld r4,0(r3) ; Get the top of the PTE
8369
8370 li r8,tlbieLock ; Get the TLBIE lock
8371 rldicr r0,r4,0,62 ; Clear the valid bit
8372 std r0,0(r3) ; Invalidate the PTE
8373
8374 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
8375 sync ; Make sure everyone sees the invalidate
8376 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
8377
8378 mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
8379 mr. r0,r0 ; Is it locked?
8380 li r0,1 ; Get our lock word
8381 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
8382
8383 stwcx. r0,0,r8 ; Try to get it
8384 bne-- mITLBIE64 ; We was beat...
8385
8386 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
8387
8388 li r0,0 ; Lock clear value
8389
8390 tlbie r2 ; Invalidate it everywhere
8391
8392 eieio ; Make sure that the tlbie happens first
8393 tlbsync ; Wait for everyone to catch up
8394 ptesync ; Wait for quiet again
8395
8396 stw r0,tlbieLock(0) ; Clear the tlbie lock
8397
8398 ld r5,8(r3) ; Get the real part
8399 srdi r10,r5,12 ; Change physical address to a ppnum
8400 b mINmerge ; Join the common 32-64-bit code...
8401
8402 mITLBIE64a: li r5,lgKillResv ; Killing field
8403 stwcx. r5,0,r5 ; Kill reservation
8404
8405 mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
8406 mr. r0,r0 ; Is it locked?
8407 beq++ mITLBIE64 ; Nope, try again...
8408 b mITLBIE64b ; Yup, wait for it...
8409
8410 ;
8411 ; mapLockPteg - Locks a PTEG
8412 ; R7 points to PCA entry
8413 ; R6 contains PCA on return
8414 ;
8415 ;
8416
8417 .align 5
8418
8419 mapLockPteg:
8420 lwarx r6,0,r7 ; Pick up the PCA
8421 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8422 ori r0,r6,PCAlock ; Set the lock bit
8423 bne-- mLSkill ; It is locked...
8424
8425 stwcx. r0,0,r7 ; Try to lock the PTEG
8426 bne-- mapLockPteg ; We collided...
8427
8428 isync ; Nostradamus lied
8429 blr ; Leave...
8430
8431 mLSkill: li r6,lgKillResv ; Get killing field
8432 stwcx. r6,0,r6 ; Kill it
8433
8434 mapLockPteh:
8435 lwz r6,0(r7) ; Pick up the PCA
8436 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8437 beq++ mapLockPteg ; Nope, try again...
8438 b mapLockPteh ; Yes, wait for it...
8439
8440
8441 ;
8442 ; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
8443 ; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
8444 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
8445 ; R4 returns the slot index.
8446 ;
8447 ; CR7 also indicates that we have a block mapping
8448 ;
8449 ; The PTEG allocation controls are a bit map of the state of the PTEG.
8450 ; PCAfree indicates that the PTE slot is empty.
8451 ; PCAauto means that it comes from an autogen area. These
8452 ; guys do not keep track of reference and change and are actually "wired".
8453 ; They are easy to maintain. PCAsteal
8454 ; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
8455 ; fields fit in a single word and are loaded and stored under control of the
8456 ; PTEG control area lock (PCAlock).
8457 ;
8458 ; Note that PCAauto does not contribute to the steal calculations at all. Originally
8459 ; it did, autogens were second in priority. This can result in a pathalogical
8460 ; case where an instruction can not make forward progress, or one PTE slot
8461 ; thrashes.
8462 ;
8463 ; Note that the PCA must be locked when we get here.
8464 ;
8465 ; Physically, the fields are arranged:
8466 ; 0: PCAfree
8467 ; 1: PCAsteal
8468 ; 2: PCAauto
8469 ; 3: PCAmisc
8470 ;
8471 ;
8472 ; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
8473 ;
8474 ; At exit:
8475 ;
8476 ; R3 = 0 - no steal
8477 ; R3 = 1 - steal regular
8478 ; R3 = 2 - steal autogen
8479 ; R4 contains slot number
8480 ; R6 contains updated PCA image
8481 ;
8482
8483 .align 5
8484
8485 mapSelSlot: lis r10,0 ; Clear autogen mask
8486 li r9,0 ; Start a mask
8487 beq cr7,mSSnotblk ; Skip if this is not a block mapping
8488 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
8489
8490 mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
8491 oris r9,r9,0x8000 ; Get a mask
8492 cntlzw r4,r6 ; Find a slot or steal one
8493 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
8494 rlwinm r4,r4,0,29,31 ; Isolate bit position
8495 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
8496 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
8497 srwi r11,r11,1 ; Slide steal mask right
8498 and r8,r6,r2 ; Isolate the old in use and autogen bits
8499 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
8500 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
8501 and r2,r2,r10 ; Keep the autogen part if autogen
8502 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
8503 or r6,r6,r2 ; Add in the new autogen bit
8504 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
8505 rlwinm r8,r8,1,31,31 ; Isolate old in use
8506 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
8507
8508 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
8509 blr ; Leave...
8510
8511 ;
8512 ; Shared/Exclusive locks
8513 ;
8514 ; A shared/exclusive lock allows multiple shares of a lock to be taken
8515 ; but only one exclusive. A shared lock can be "promoted" to exclusive
8516 ; when it is the only share. If there are multiple sharers, the lock
8517 ; must be "converted". A promotion drops the share and gains exclusive as
8518 ; an atomic operation. If anyone else has a share, the operation fails.
8519 ; A conversion first drops the share and then takes an exclusive lock.
8520 ;
8521 ; We will want to add a timeout to this eventually.
8522 ;
8523 ; R3 is set to 0 for success, non-zero for failure
8524 ;
8525
8526 ;
8527 ; Convert a share into an exclusive
8528 ;
8529
8530 .align 5
8531
8532 sxlkConvert:
8533
8534 lis r0,0x8000 ; Get the locked lock image
8535 #if 0
8536 mflr r0 ; (TEST/DEBUG)
8537 oris r0,r0,0x8000 ; (TEST/DEBUG)
8538 #endif
8539
8540 sxlkCTry: lwarx r2,0,r3 ; Get the lock word
8541 cmplwi r2,1 ; Does it just have our share?
8542 subi r2,r2,1 ; Drop our share in case we do not get it
8543 bne-- sxlkCnotfree ; No, we need to unlock...
8544 stwcx. r0,0,r3 ; Try to take it exclusively
8545 bne-- sxlkCTry ; Collision, try again...
8546
8547 isync
8548 li r3,0 ; Set RC
8549 blr ; Leave...
8550
8551 sxlkCnotfree:
8552 stwcx. r2,0,r3 ; Try to drop our share...
8553 bne-- sxlkCTry ; Try again if we collided...
8554 b sxlkExclusive ; Go take it exclusively...
8555
8556 ;
8557 ; Promote shared to exclusive
8558 ;
8559
8560 .align 5
8561
8562 sxlkPromote:
8563 lis r0,0x8000 ; Get the locked lock image
8564 #if 0
8565 mflr r0 ; (TEST/DEBUG)
8566 oris r0,r0,0x8000 ; (TEST/DEBUG)
8567 #endif
8568
8569 sxlkPTry: lwarx r2,0,r3 ; Get the lock word
8570 cmplwi r2,1 ; Does it just have our share?
8571 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
8572 stwcx. r0,0,r3 ; Try to take it exclusively
8573 bne-- sxlkPTry ; Collision, try again...
8574
8575 isync
8576 li r3,0 ; Set RC
8577 blr ; Leave...
8578
8579 sxlkPkill: li r2,lgKillResv ; Point to killing field
8580 stwcx. r2,0,r2 ; Kill reservation
8581 blr ; Leave
8582
8583
8584
8585 ;
8586 ; Take lock exclusivily
8587 ;
8588
8589 .align 5
8590
8591 sxlkExclusive:
8592 lis r0,0x8000 ; Get the locked lock image
8593 #if 0
8594 mflr r0 ; (TEST/DEBUG)
8595 oris r0,r0,0x8000 ; (TEST/DEBUG)
8596 #endif
8597
8598 sxlkXTry: lwarx r2,0,r3 ; Get the lock word
8599 mr. r2,r2 ; Is it locked?
8600 bne-- sxlkXWait ; Yes...
8601 stwcx. r0,0,r3 ; Try to take it
8602 bne-- sxlkXTry ; Collision, try again...
8603
8604 isync ; Toss anything younger than us
8605 li r3,0 ; Set RC
8606 blr ; Leave...
8607
8608 .align 5
8609
8610 sxlkXWait: li r2,lgKillResv ; Point to killing field
8611 stwcx. r2,0,r2 ; Kill reservation
8612
8613 sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
8614 mr. r2,r2 ; Is it free yet?
8615 beq++ sxlkXTry ; Yup...
8616 b sxlkXWaiu ; Hang around a bit more...
8617
8618 ;
8619 ; Take a share of the lock
8620 ;
8621
8622 .align 5
8623
8624 sxlkShared: lwarx r2,0,r3 ; Get the lock word
8625 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8626 addi r2,r2,1 ; Up the share count
8627 bne-- sxlkSWait ; Yes...
8628 stwcx. r2,0,r3 ; Try to take it
8629 bne-- sxlkShared ; Collision, try again...
8630
8631 isync ; Toss anything younger than us
8632 li r3,0 ; Set RC
8633 blr ; Leave...
8634
8635 .align 5
8636
8637 sxlkSWait: li r2,lgKillResv ; Point to killing field
8638 stwcx. r2,0,r2 ; Kill reservation
8639
8640 sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
8641 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8642 beq++ sxlkShared ; Nope...
8643 b sxlkSWaiu ; Hang around a bit more...
8644
8645 ;
8646 ; Unlock either exclusive or shared.
8647 ;
8648
8649 .align 5
8650
8651 sxlkUnlock: eieio ; Make sure we order our stores out
8652
8653 sxlkUnTry: lwarx r2,0,r3 ; Get the lock
8654 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
8655 subi r2,r2,1 ; Remove our share if we have one
8656 li r0,0 ; Clear this
8657 bne-- sxlkUExclu ; We hold exclusive...
8658
8659 stwcx. r2,0,r3 ; Try to lose our share
8660 bne-- sxlkUnTry ; Collision...
8661 blr ; Leave...
8662
8663 sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
8664 beqlr++ ; Leave if ok...
8665 b sxlkUnTry ; Could not store, try over...
8666
8667
8668 .align 5
8669 .globl EXT(fillPage)
8670
8671 LEXT(fillPage)
8672
8673 mfsprg r0,2 ; Get feature flags
8674 mtcrf 0x02,r0 ; move pf64Bit to cr
8675
8676 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
8677 lis r2,0x0200 ; Get vec
8678 mr r6,r4 ; Copy
8679 ori r2,r2,0x2000 ; Get FP
8680 mr r7,r4 ; Copy
8681 mfmsr r5 ; Get MSR
8682 mr r8,r4 ; Copy
8683 andc r5,r5,r2 ; Clear out permanent turn-offs
8684 mr r9,r4 ; Copy
8685 ori r2,r2,0x8030 ; Clear IR, DR and EE
8686 mr r10,r4 ; Copy
8687 andc r0,r5,r2 ; Kill them
8688 mr r11,r4 ; Copy
8689 mr r12,r4 ; Copy
8690 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
8691
8692 slwi r3,r3,12 ; Make into a physical address
8693 mtmsr r2 ; Interrupts and translation off
8694 isync
8695
8696 li r2,4096/32 ; Get number of cache lines
8697
8698 fp32again: dcbz 0,r3 ; Clear
8699 addic. r2,r2,-1 ; Count down
8700 stw r4,0(r3) ; Fill
8701 stw r6,4(r3) ; Fill
8702 stw r7,8(r3) ; Fill
8703 stw r8,12(r3) ; Fill
8704 stw r9,16(r3) ; Fill
8705 stw r10,20(r3) ; Fill
8706 stw r11,24(r3) ; Fill
8707 stw r12,28(r3) ; Fill
8708 addi r3,r3,32 ; Point next
8709 bgt+ fp32again ; Keep going
8710
8711 mtmsr r5 ; Restore all
8712 isync
8713 blr ; Return...
8714
8715 .align 5
8716
8717 fpSF1: li r2,1
8718 sldi r2,r2,63 ; Get 64-bit bit
8719 or r0,r0,r2 ; Turn on 64-bit
8720 sldi r3,r3,12 ; Make into a physical address
8721
8722 mtmsrd r0 ; Interrupts and translation off
8723 isync
8724
8725 li r2,4096/128 ; Get number of cache lines
8726
8727 fp64again: dcbz128 0,r3 ; Clear
8728 addic. r2,r2,-1 ; Count down
8729 std r4,0(r3) ; Fill
8730 std r6,8(r3) ; Fill
8731 std r7,16(r3) ; Fill
8732 std r8,24(r3) ; Fill
8733 std r9,32(r3) ; Fill
8734 std r10,40(r3) ; Fill
8735 std r11,48(r3) ; Fill
8736 std r12,56(r3) ; Fill
8737 std r4,64+0(r3) ; Fill
8738 std r6,64+8(r3) ; Fill
8739 std r7,64+16(r3) ; Fill
8740 std r8,64+24(r3) ; Fill
8741 std r9,64+32(r3) ; Fill
8742 std r10,64+40(r3) ; Fill
8743 std r11,64+48(r3) ; Fill
8744 std r12,64+56(r3) ; Fill
8745 addi r3,r3,128 ; Point next
8746 bgt+ fp64again ; Keep going
8747
8748 mtmsrd r5 ; Restore all
8749 isync
8750 blr ; Return...
8751
8752 .align 5
8753 .globl EXT(mapLog)
8754
8755 LEXT(mapLog)
8756
8757 mfmsr r12
8758 lis r11,hi16(EXT(mapdebug))
8759 ori r11,r11,lo16(EXT(mapdebug))
8760 lwz r10,0(r11)
8761 mr. r10,r10
8762 bne++ mLxx
8763 mr r10,r3
8764 mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
8765 mtmsr r0
8766 isync
8767 stw r4,0(r10)
8768 stw r4,4(r10)
8769 stw r5,8(r10)
8770 stw r6,12(r10)
8771 mtmsr r12
8772 isync
8773 addi r10,r10,16
8774 stw r10,0(r11)
8775 blr
8776
8777 #if 1
8778 .align 5
8779 .globl EXT(checkBogus)
8780
8781 LEXT(checkBogus)
8782
8783 BREAKPOINT_TRAP
8784 blr ; No-op normally
8785
8786 #endif
8787
8788
8789
8790