]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_vm.s
640a637f68a7b35a77fed5bcb0283c4b92dcc0cd
[apple/xnu.git] / osfmk / ppc / hw_vm.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <assym.s>
23 #include <debug.h>
24 #include <cpus.h>
25 #include <db_machine_commands.h>
26 #include <mach_rt.h>
27
28 #include <mach_debug.h>
29 #include <ppc/asm.h>
30 #include <ppc/proc_reg.h>
31 #include <ppc/exception.h>
32 #include <ppc/Performance.h>
33 #include <ppc/exception.h>
34 #include <ppc/pmap_internals.h>
35 #include <mach/ppc/vm_param.h>
36 #define PERFTIMES 0
37
38 .text
39
40 /*
41 *
42 * Random notes and musings...
43 *
44 * Access to mappings via the PTEG hash must be done with the list locked.
45 * Access via the physical entries is controlled by the physent lock.
46 * Access to mappings is controlled by the PTEG lock once they are queued.
47 * If they are not on the list, they don't really exist, so
48 * only one processor at a time can find them, so no access control is needed.
49 *
50 * The second half of the PTE is kept in the physical entry. It is done this
51 * way, because there may be multiple mappings that refer to the same physical
52 * page (i.e., address aliases or synonymns). We must do it this way, because
53 * maintenance of the reference and change bits becomes nightmarish if each mapping
54 * has its own. One side effect of this, and not necessarily a bad one, is that
55 * all mappings for a single page can have a single WIMG, protection state, and RC bits.
56 * The only "bad" thing, is the reference bit. With a single copy, we can not get
57 * a completely accurate working set calculation, i.e., we can't tell which mapping was
58 * used to reference the page, all we can tell is that the physical page was
59 * referenced.
60 *
61 * The master copys of the reference and change bits are kept in the phys_entry.
62 * Other than the reference and change bits, changes to the phys_entry are not
63 * allowed if it has any mappings. The master reference and change bits must be
64 * changed via atomic update.
65 *
66 * Invalidating a PTE merges the RC bits into the phys_entry.
67 *
68 * Before checking the reference and/or bits, ALL mappings to the physical page are
69 * invalidated.
70 *
71 * PTEs are never explicitly validated, they are always faulted in. They are also
72 * not visible outside of the hw_vm modules. Complete seperation of church and state.
73 *
74 * Removal of a mapping is invalidates its PTE.
75 *
76 * So, how do we deal with mappings to I/O space? We don't have a physent for it.
77 * Within the mapping is a copy of the second half of the PTE. This is used
78 * ONLY when there is no physical entry. It is swapped into the PTE whenever
79 * it is built. There is no need to swap it back out, because RC is not
80 * maintained for these mappings.
81 *
82 * So, I'm starting to get concerned about the number of lwarx/stcwx loops in
83 * this. Satisfying a mapped address with no stealing requires one lock. If we
84 * steal an entry, there's two locks and an atomic update. Invalidation of an entry
85 * takes one lock and, if there is a PTE, another lock and an atomic update. Other
86 * operations are multiples (per mapping) of the above. Maybe we should look for
87 * an alternative. So far, I haven't found one, but I haven't looked hard.
88 */
89
90
91 /* hw_add_map(struct mapping *mp, space_t space, vm_offset_t va) - Adds a mapping
92 *
93 * Adds a mapping to the PTEG hash list.
94 *
95 * Interrupts must be disabled before calling.
96 *
97 * Using the space and the virtual address, we hash into the hash table
98 * and get a lock on the PTEG hash chain. Then we chain the
99 * mapping to the front of the list.
100 *
101 */
102
103 .align 5
104 .globl EXT(hw_add_map)
105
106 LEXT(hw_add_map)
107
108 #if PERFTIMES && DEBUG
109 mr r7,r3
110 mflr r11
111 li r3,20
112 bl EXT(dbgLog2) ; Start of hw_add_map
113 mr r3,r7
114 mtlr r11
115 #endif
116
117 mfmsr r0 /* Get the MSR */
118 eqv r6,r6,r6 /* Fill the bottom with foxes */
119 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
120 rlwinm r11,r4,6,6,25 /* Position the space for the VSID */
121 mfspr r10,sdr1 /* Get hash table base and size */
122 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
123 rlwimi r11,r5,30,2,5 /* Insert the segment no. to make a VSID */
124 mfsprg r12,2 ; Get feature flags
125 rlwimi r6,r10,16,0,15 /* Make table size -1 out of mask */
126 rlwinm r7,r5,26,10,25 /* Isolate the page index */
127 or r8,r10,r6 /* Point to the last byte in table */
128 rlwinm r9,r5,4,0,3 ; Move nybble 1 up to 0
129 xor r7,r7,r11 /* Get primary hash */
130 mtcrf 0x04,r12 ; Set the features
131 andi. r12,r0,0x7FCF /* Disable translation and interruptions */
132 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
133 addi r8,r8,1 /* Point to the PTEG Control Area */
134 xor r9,r9,r5 ; Splooch vaddr nybble 0 and 1 together
135 and r7,r7,r6 /* Wrap the hash */
136 rlwimi r11,r5,10,26,31 /* Move API into pte ID */
137 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
138 add r8,r8,r7 /* Point to our PCA entry */
139 rlwinm r10,r4,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
140
141 bt pfNoMSRirb,hamNoMSR ; No MSR...
142
143 mtmsr r12 ; Translation and all off
144 isync ; Toss prefetch
145 b hamNoMSRx
146
147 hamNoMSR: mr r4,r0 ; Save R0
148 mr r2,r3 ; Save
149 li r0,loadMSR ; Get the MSR setter SC
150 mr r3,r12 ; Get new MSR
151 sc ; Set it
152 mr r0,r4 ; Restore
153 mr r3,r2 ; Restore
154 hamNoMSRx:
155
156 la r4,PCAhash(r8) /* Point to the mapping hash area */
157 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
158 isync /* Get rid of anything prefetched before we ref storage */
159 /*
160 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
161 * and word 0 of the PTE (the virtual part).
162 *
163 * Now, we just lock the PCA.
164 */
165
166 li r12,1 /* Get the locked value */
167 dcbt 0,r4 /* We'll need the hash area in a sec, so get it */
168 add r4,r4,r9 /* Point to the right mapping hash slot */
169
170 ptegLckx: lwarx r10,0,r8 /* Get the PTEG lock */
171 mr. r10,r10 /* Is it locked? */
172 bne- ptegLckwx /* Yeah... */
173 stwcx. r12,0,r8 /* Take take it */
174 bne- ptegLckx /* Someone else was trying, try again... */
175 b ptegSXgx /* All done... */
176
177 .align 4
178
179 ptegLckwx: mr. r10,r10 /* Check if it's already held */
180 beq+ ptegLckx /* It's clear... */
181 lwz r10,0(r8) /* Get lock word again... */
182 b ptegLckwx /* Wait... */
183
184 .align 4
185
186 ptegSXgx: isync /* Make sure we haven't used anything yet */
187
188 lwz r7,0(r4) /* Pick up the anchor of hash list */
189 stw r3,0(r4) /* Save the new head */
190 stw r7,mmhashnext(r3) /* Chain in the old head */
191
192 stw r4,mmPTEhash(r3) /* Point to the head of the hash list */
193
194 sync /* Make sure the chain is updated */
195 stw r10,0(r8) /* Unlock the hash list */
196 mtmsr r0 /* Restore translation and interruptions */
197 isync /* Toss anything done with DAT off */
198 #if PERFTIMES && DEBUG
199 mflr r11
200 mr r4,r3
201 li r3,21
202 bl EXT(dbgLog2) ; end of hw_add_map
203 mr r3,r4
204 mtlr r11
205 #endif
206 blr /* Leave... */
207
208
209 /* mp=hw_lock_phys_vir(space, va) - Finds and locks a physical entry by vaddr.
210 *
211 * Returns the mapping with the associated physent locked if found, or a
212 * zero and no lock if not. It we timed out trying to get a the lock on
213 * the physical entry, we retun a 1. A physical entry can never be on an
214 * odd boundary, so we can distinguish between a mapping and a timeout code.
215 *
216 * Interrupts must be disabled before calling.
217 *
218 * Using the space and the virtual address, we hash into the hash table
219 * and get a lock on the PTEG hash chain. Then we search the chain for the
220 * mapping for our virtual address. From there, we extract the pointer to
221 * the physical entry.
222 *
223 * Next comes a bit of monkey business. we need to get a lock on the physical
224 * entry. But, according to our rules, we can't get it after we've gotten the
225 * PTEG hash lock, we could deadlock if we do. So, we need to release the
226 * hash lock. The problem is, though, that as soon as we release it, some
227 * other yahoo may remove our mapping between the time that we release the
228 * hash lock and obtain the phys entry lock. So, we can't count on the
229 * mapping once we release the lock. Instead, after we lock the phys entry,
230 * we search the mapping list (phys_link) for our translation. If we don't find it,
231 * we unlock the phys entry, bail out, and return a 0 for the mapping address. If we
232 * did find it, we keep the lock and return the address of the mapping block.
233 *
234 * What happens when a mapping is found, but there is no physical entry?
235 * This is what happens when there is I/O area mapped. It one of these mappings
236 * is found, the mapping is returned, as is usual for this call, but we don't
237 * try to lock anything. There could possibly be some problems here if another
238 * processor releases the mapping while we still alre using it. Hope this
239 * ain't gonna happen.
240 *
241 * Taaa-dahhh! Easy as pie, huh?
242 *
243 * So, we have a few hacks hacks for running translate off in here.
244 * First, when we call the lock routine, we have carnel knowlege of the registers is uses.
245 * That way, we don't need a stack frame, which we can't have 'cause the stack is in
246 * virtual storage. But wait, as if that's not enough... We need one more register. So,
247 * we cram the LR into the CTR and return from there.
248 *
249 */
250 .align 5
251 .globl EXT(hw_lock_phys_vir)
252
253 LEXT(hw_lock_phys_vir)
254
255 #if PERFTIMES && DEBUG
256 mflr r11
257 mr r5,r3
258 li r3,22
259 bl EXT(dbgLog2) ; Start of hw_add_map
260 mr r3,r5
261 mtlr r11
262 #endif
263 mfmsr r12 /* Get the MSR */
264 eqv r6,r6,r6 /* Fill the bottom with foxes */
265 mfsprg r9,2 ; Get feature flags
266 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
267 rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
268 mfspr r5,sdr1 /* Get hash table base and size */
269 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
270 mtcrf 0x04,r9 ; Set the features
271 rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
272 rlwimi r6,r5,16,0,15 /* Make table size -1 out of mask */
273 andi. r0,r12,0x7FCF /* Disable translation and interruptions */
274 rlwinm r9,r4,4,0,3 ; Move nybble 1 up to 0
275 rlwinm r7,r4,26,10,25 /* Isolate the page index */
276 or r8,r5,r6 /* Point to the last byte in table */
277 xor r7,r7,r11 /* Get primary hash */
278 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
279 addi r8,r8,1 /* Point to the PTEG Control Area */
280 xor r9,r9,r4 ; Splooch vaddr nybble 0 and 1 together
281 and r7,r7,r6 /* Wrap the hash */
282 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
283 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
284 add r8,r8,r7 /* Point to our PCA entry */
285 rlwinm r10,r3,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
286
287 bt pfNoMSRirb,hlpNoMSR ; No MSR...
288
289 mtmsr r0 ; Translation and all off
290 isync ; Toss prefetch
291 b hlpNoMSRx
292
293 hlpNoMSR: mr r3,r0 ; Get the new MSR
294 li r0,loadMSR ; Get the MSR setter SC
295 sc ; Set it
296 hlpNoMSRx:
297
298 la r3,PCAhash(r8) /* Point to the mapping hash area */
299 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
300 isync /* Make sure translation is off before we ref storage */
301
302 /*
303 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
304 * and word 0 of the PTE (the virtual part).
305 *
306 * Now, we just lock the PCA and find our mapping, if it exists.
307 */
308
309 dcbt 0,r3 /* We'll need the hash area in a sec, so get it */
310 add r3,r3,r9 /* Point to the right mapping hash slot */
311
312 ptegLcka: lwarx r10,0,r8 /* Get the PTEG lock */
313 li r5,1 /* Get the locked value */
314 mr. r10,r10 /* Is it locked? */
315 bne- ptegLckwa /* Yeah... */
316 stwcx. r5,0,r8 /* Take take it */
317 bne- ptegLcka /* Someone else was trying, try again... */
318 b ptegSXga /* All done... */
319
320 .align 4
321
322 ptegLckwa: mr. r10,r10 /* Check if it's already held */
323 beq+ ptegLcka /* It's clear... */
324 lwz r10,0(r8) /* Get lock word again... */
325 b ptegLckwa /* Wait... */
326
327 .align 4
328
329 ptegSXga: isync /* Make sure we haven't used anything yet */
330
331 mflr r0 /* Get the LR */
332 lwz r9,0(r3) /* Pick up the first mapping block */
333 mtctr r0 /* Stuff it into the CTR */
334
335 findmapa:
336
337 mr. r3,r9 /* Did we hit the end? */
338 bne+ chkmapa /* Nope... */
339
340 stw r3,0(r8) /* Unlock the PTEG lock
341 Note: we never saved anything while we
342 had the lock, so we don't need a sync
343 before we unlock it */
344
345 vbail: mtmsr r12 /* Restore translation and interruptions */
346 isync /* Make sure translation is cool */
347 #if PERFTIMES && DEBUG
348 mflr r11
349 mr r4,r3
350 li r3,23
351 bl EXT(dbgLog2) ; Start of hw_add_map
352 mr r3,r4
353 mtlr r11
354 #endif
355 bctr /* Return in abject failure... */
356
357 .align 4
358
359 chkmapa: lwz r10,mmPTEv(r3) /* Pick up our virtual ID */
360 lwz r9,mmhashnext(r3) /* Pick up next mapping block */
361 cmplw r10,r11 /* Have we found ourself? */
362 bne- findmapa /* Nope, still wandering... */
363
364 lwz r9,mmphysent(r3) /* Get our physical entry pointer */
365 li r5,0 /* Clear this out */
366 mr. r9,r9 /* Is there, like, a physical entry? */
367 stw r5,0(r8) /* Unlock the PTEG lock
368 Note: we never saved anything while we
369 had the lock, so we don't need a sync
370 before we unlock it */
371
372 beq- vbail /* If there is no physical entry, it's time
373 to leave... */
374
375 /* Here we want to call hw_lock_bit. We don't want to use the stack, 'cause it's
376 * in virtual storage, and we're in real. So, we've carefully looked at the code
377 * in hw_lock_bit (and unlock) and cleverly don't use any of the registers that it uses.
378 * Be very, very aware of how you change this code. By the way, it uses:
379 * R0, R6, R7, R8, and R9. R3, R4, and R5 contain parameters
380 * Unfortunatly, we need to stash R9 still. So... Since we know we will not be interrupted
381 * ('cause we turned off interruptions and translation is off) we will use SPRG3...
382 */
383
384 lwz r10,mmPTEhash(r3) /* Save the head of the hash-alike chain. We need it to find ourselves later */
385 lis r5,HIGH_ADDR(EXT(LockTimeOut)) /* Get address of timeout value */
386 la r3,pephyslink(r9) /* Point to the lock word */
387 ori r5,r5,LOW_ADDR(EXT(LockTimeOut)) /* Get second half of address */
388 li r4,PHYS_LOCK /* Get the lock bit value */
389 lwz r5,0(r5) /* Pick up the timeout value */
390 mtsprg 3,r9 /* Save R9 in SPRG3 */
391
392 bl EXT(hw_lock_bit) /* Go do the lock */
393
394 mfsprg r9,3 /* Restore pointer to the phys_entry */
395 mr. r3,r3 /* Did we timeout? */
396 lwz r4,pephyslink(r9) /* Pick up first mapping block */
397 beq- penterr /* Bad deal, we timed out... */
398
399 rlwinm r4,r4,0,0,26 ; Clear out the flags from first link
400
401 findmapb: mr. r3,r4 /* Did we hit the end? */
402 bne+ chkmapb /* Nope... */
403
404 la r3,pephyslink(r9) /* Point to where the lock is */
405 li r4,PHYS_LOCK /* Get the lock bit value */
406 bl EXT(hw_unlock_bit) /* Go unlock the physentry */
407
408 li r3,0 /* Say we failed */
409 b vbail /* Return in abject failure... */
410
411 penterr: li r3,1 /* Set timeout */
412 b vbail /* Return in abject failure... */
413
414 .align 5
415
416 chkmapb: lwz r6,mmPTEv(r3) /* Pick up our virtual ID */
417 lwz r4,mmnext(r3) /* Pick up next mapping block */
418 cmplw r6,r11 /* Have we found ourself? */
419 lwz r5,mmPTEhash(r3) /* Get the start of our hash chain */
420 bne- findmapb /* Nope, still wandering... */
421 cmplw r5,r10 /* On the same hash chain? */
422 bne- findmapb /* Nope, keep looking... */
423
424 b vbail /* Return in glorious triumph... */
425
426
427 /*
428 * hw_rem_map(mapping) - remove a mapping from the system.
429 *
430 * Upon entry, R3 contains a pointer to a mapping block and the associated
431 * physical entry is locked if there is one.
432 *
433 * If the mapping entry indicates that there is a PTE entry, we invalidate
434 * if and merge the reference and change information into the phys_entry.
435 *
436 * Next, we remove the mapping from the phys_ent and the PTEG hash list.
437 *
438 * Unlock any locks that are left, and exit.
439 *
440 * Note that this must be done with both interruptions off and VM off
441 *
442 * Note that this code depends upon the VSID being of the format 00SXXXXX
443 * where S is the segment number.
444 *
445 *
446 */
447
448 .align 5
449 .globl EXT(hw_rem_map)
450
451 LEXT(hw_rem_map)
452 #if PERFTIMES && DEBUG
453 mflr r11
454 mr r4,r3
455 li r3,24
456 bl EXT(dbgLog2) ; Start of hw_add_map
457 mr r3,r4
458 mtlr r11
459 #endif
460 mfsprg r9,2 ; Get feature flags
461 mfmsr r0 /* Save the MSR */
462 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
463 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
464 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
465 mtcrf 0x04,r9 ; Set the features
466 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
467
468 bt pfNoMSRirb,lmvNoMSR ; No MSR...
469
470 mtmsr r12 ; Translation and all off
471 isync ; Toss prefetch
472 b lmvNoMSRx
473
474 lmvNoMSR:
475 mr r6,r0
476 mr r4,r3
477 li r0,loadMSR ; Get the MSR setter SC
478 mr r3,r12 ; Get new MSR
479 sc ; Set it
480 mr r3,r4
481 mr r0,r6
482
483 lmvNoMSRx:
484
485
486 lwz r6,mmPTEhash(r3) /* Get pointer to hash list anchor */
487 lwz r5,mmPTEv(r3) /* Get the VSID */
488 dcbt 0,r6 /* We'll need that chain in a bit */
489
490 rlwinm r7,r6,0,0,25 /* Round hash list down to PCA boundary */
491 li r12,1 /* Get the locked value */
492 subi r6,r6,mmhashnext /* Make the anchor look like an entry */
493
494 ptegLck1: lwarx r10,0,r7 /* Get the PTEG lock */
495 mr. r10,r10 /* Is it locked? */
496 bne- ptegLckw1 /* Yeah... */
497 stwcx. r12,0,r7 /* Try to take it */
498 bne- ptegLck1 /* Someone else was trying, try again... */
499 b ptegSXg1 /* All done... */
500
501 .align 4
502
503 ptegLckw1: mr. r10,r10 /* Check if it's already held */
504 beq+ ptegLck1 /* It's clear... */
505 lwz r10,0(r7) /* Get lock word again... */
506 b ptegLckw1 /* Wait... */
507
508 .align 4
509
510 ptegSXg1: isync /* Make sure we haven't used anything yet */
511
512 lwz r12,mmhashnext(r3) /* Prime with our forward pointer */
513 lwz r4,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
514
515 srchmaps: mr. r10,r6 /* Save the previous entry */
516 bne+ mapok /* No error... */
517
518 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
519 ori r0,r0,LOW_ADDR(Choke)
520 sc /* Firmware Heimlich manuever */
521
522 .align 4
523
524 mapok: lwz r6,mmhashnext(r6) /* Look at the next one */
525 cmplwi cr5,r4,0 /* Is there a PTE? */
526 cmplw r6,r3 /* Have we found ourselves? */
527 bne+ srchmaps /* Nope, get your head together... */
528
529 stw r12,mmhashnext(r10) /* Remove us from the queue */
530 rlwinm r9,r5,1,0,3 /* Move in the segment */
531 rlwinm r8,r4,6,4,19 /* Line PTEG disp up to a page */
532 rlwinm r11,r5,5,4,19 /* Line up the VSID */
533 lwz r10,mmphysent(r3) /* Point to the physical entry */
534
535 beq+ cr5,nopte /* There's no PTE to invalidate... */
536
537 xor r8,r8,r11 /* Back hash to virt index */
538 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
539 rlwimi r9,r5,22,4,9 /* Move in the API */
540 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
541 mfspr r11,pvr /* Find out what kind of machine we are */
542 rlwimi r9,r8,0,10,19 /* Create the virtual address */
543 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
544
545 stw r5,0(r4) /* Make the PTE invalid */
546
547 cmplwi cr1,r11,3 /* Is this a 603? */
548 sync /* Make sure the invalid is stored */
549
550 tlbhang1: lwarx r5,0,r12 /* Get the TLBIE lock */
551 rlwinm r11,r4,29,29,31 /* Get the bit position of entry */
552 mr. r5,r5 /* Is it locked? */
553 lis r6,0x8000 /* Start up a bit mask */
554 li r5,1 /* Get our lock word */
555 bne- tlbhang1 /* It's locked, go wait... */
556 stwcx. r5,0,r12 /* Try to get it */
557 bne- tlbhang1 /* We was beat... */
558
559 srw r6,r6,r11 /* Make a "free slot" mask */
560 lwz r5,PCAallo(r7) /* Get the allocation control bits */
561 rlwinm r11,r6,24,8,15 /* Make the autogen bit to turn off */
562 or r5,r5,r6 /* turn on the free bit */
563 rlwimi r11,r11,24,16,23 /* Get lock bit mask to turn it off */
564
565 andc r5,r5,r11 /* Turn off the lock and autogen bits in allocation flags */
566 li r11,0 /* Lock clear value */
567
568 tlbie r9 /* Invalidate it everywhere */
569
570
571 beq- cr1,its603a /* It's a 603, skip the tlbsync... */
572
573 eieio /* Make sure that the tlbie happens first */
574 tlbsync /* wait for everyone to catch up */
575 isync
576
577 its603a: sync /* Make sure of it all */
578 stw r11,0(r12) /* Clear the tlbie lock */
579 eieio /* Make sure those RC bit are loaded */
580 stw r5,PCAallo(r7) /* Show that the slot is free */
581 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
582
583 nopte: mr. r10,r10 /* See if there is a physical entry */
584 la r9,pephyslink(r10) /* Point to the physical mapping chain */
585 beq- nophys /* No physical entry, we're done... */
586 beq- cr5,nadamrg /* No PTE to merge... */
587
588 lwz r6,4(r4) /* Get the latest reference and change bits */
589 la r12,pepte1(r10) /* Point right at the master copy */
590 rlwinm r6,r6,0,23,24 /* Extract just the RC bits */
591
592 mrgrc: lwarx r8,0,r12 /* Get the master copy */
593 or r8,r8,r6 /* Merge in latest RC */
594 stwcx. r8,0,r12 /* Save it back */
595 bne- mrgrc /* If it changed, try again... */
596
597 nadamrg: li r11,0 /* Clear this out */
598 lwz r12,mmnext(r3) /* Prime with our next */
599
600 sync ; Make sure all is saved
601
602 stw r11,0(r7) /* Unlock the hash chain now so we don't
603 lock out another processor during
604 our next little search */
605
606 srchpmap: mr. r10,r9 /* Save the previous entry */
607 bne+ mapok1 /* No error... */
608
609 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
610 ori r0,r0,LOW_ADDR(Choke)
611 sc /* Firmware Heimlich maneuver */
612
613 .align 4
614
615 mapok1: lwz r9,mmnext(r9) /* Look at the next one */
616 rlwinm r8,r9,0,27,31 ; Save the flags (including the lock)
617 rlwinm r9,r9,0,0,26 ; Clear out the flags from first link
618 cmplw r9,r3 /* Have we found ourselves? */
619 bne+ srchpmap /* Nope, get your head together... */
620
621 rlwimi r12,r8,0,27,31 ; Insert the lock and flags */
622 stw r12,mmnext(r10) /* Remove us from the queue */
623
624 mtmsr r0 /* Interrupts and translation back on */
625 isync
626 #if PERFTIMES && DEBUG
627 mflr r11
628 li r3,25
629 bl EXT(dbgLog2) ; Start of hw_add_map
630 mtlr r11
631 #endif
632 blr /* Return... */
633
634 .align 4
635
636 nophys: li r4,0 /* Make sure this is 0 */
637 sync /* Make sure that chain is updated */
638 stw r4,0(r7) /* Unlock the hash chain */
639 mtmsr r0 /* Interrupts and translation back on */
640 isync
641 #if PERFTIMES && DEBUG
642 mflr r11
643 li r3,25
644 bl EXT(dbgLog2) ; Start of hw_add_map
645 mtlr r11
646 #endif
647 blr /* Return... */
648
649
650 /*
651 * hw_prot(physent, prot) - Change the protection of a physical page
652 *
653 * Upon entry, R3 contains a pointer to a physical entry which is locked.
654 * R4 contains the PPC protection bits.
655 *
656 * The first thing we do is to slam the new protection into the phys entry.
657 * Then we scan the mappings and process each one.
658 *
659 * Acquire the lock on the PTEG hash list for the mapping being processed.
660 *
661 * If the current mapping has a PTE entry, we invalidate
662 * it and merge the reference and change information into the phys_entry.
663 *
664 * Next, slam the protection bits into the entry and unlock the hash list.
665 *
666 * Note that this must be done with both interruptions off and VM off
667 *
668 *
669 */
670
671 .align 5
672 .globl EXT(hw_prot)
673
674 LEXT(hw_prot)
675 #if PERFTIMES && DEBUG
676 mflr r11
677 mr r7,r3
678 // lwz r5,4(r3)
679 li r5,0x1111
680 li r3,26
681 bl EXT(dbgLog2) ; Start of hw_add_map
682 mr r3,r7
683 mtlr r11
684 #endif
685 mfsprg r9,2 ; Get feature flags
686 mfmsr r0 /* Save the MSR */
687 li r5,pepte1 /* Get displacement to the second word of master pte */
688 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
689 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
690 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
691 mtcrf 0x04,r9 ; Set the features
692 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
693
694 bt pfNoMSRirb,hpNoMSR ; No MSR...
695
696 mtmsr r12 ; Translation and all off
697 isync ; Toss prefetch
698 b hpNoMSRx
699
700 hpNoMSR:
701 mr r10,r0
702 mr r7,r3
703 li r0,loadMSR ; Get the MSR setter SC
704 mr r3,r12 ; Get new MSR
705 sc ; Set it
706 mr r0,r10
707 mr r3,r7
708 hpNoMSRx:
709
710
711
712 lwz r10,pephyslink(r3) /* Get the first mapping block */
713 rlwinm r10,r10,0,0,26 ; Clear out the flags from first link
714
715 /*
716 * Note that we need to to do the interlocked update here because another processor
717 * can be updating the reference and change bits even though the physical entry
718 * is locked. All modifications to the PTE portion of the physical entry must be
719 * done via interlocked update.
720 */
721
722 protcng: lwarx r8,r5,r3 /* Get the master copy */
723 rlwimi r8,r4,0,30,31 /* Move in the protection bits */
724 stwcx. r8,r5,r3 /* Save it back */
725 bne- protcng /* If it changed, try again... */
726
727
728
729 protnext: mr. r10,r10 /* Are there any more mappings? */
730 beq- protdone /* Naw... */
731
732 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
733 lwz r5,mmPTEv(r10) /* Get the virtual address */
734 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
735
736 li r12,1 /* Get the locked value */
737
738 protLck1: lwarx r11,0,r7 /* Get the PTEG lock */
739 mr. r11,r11 /* Is it locked? */
740 bne- protLckw1 /* Yeah... */
741 stwcx. r12,0,r7 /* Try to take it */
742 bne- protLck1 /* Someone else was trying, try again... */
743 b protSXg1 /* All done... */
744
745 .align 4
746
747 protLckw1: mr. r11,r11 /* Check if it's already held */
748 beq+ protLck1 /* It's clear... */
749 lwz r11,0(r7) /* Get lock word again... */
750 b protLckw1 /* Wait... */
751
752 .align 4
753
754 protSXg1: isync /* Make sure we haven't used anything yet */
755
756 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
757
758 rlwinm r9,r5,1,0,3 /* Move in the segment */
759 lwz r2,mmPTEr(r10) ; Get the mapping copy of the PTE
760 mr. r6,r6 /* See if there is a PTE here */
761 rlwinm r8,r5,31,2,25 /* Line it up */
762 rlwimi r2,r4,0,30,31 ; Move protection bits into the mapping copy
763
764 beq+ protul /* There's no PTE to invalidate... */
765
766 xor r8,r8,r6 /* Back hash to virt index */
767 rlwimi r9,r5,22,4,9 /* Move in the API */
768 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
769 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
770 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
771 mfspr r11,pvr /* Find out what kind of machine we are */
772 rlwimi r9,r8,6,10,19 /* Create the virtual address */
773 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
774
775 stw r5,0(r6) /* Make the PTE invalid */
776 cmplwi cr1,r11,3 /* Is this a 603? */
777 sync /* Make sure the invalid is stored */
778
779 tlbhangp: lwarx r11,0,r12 /* Get the TLBIE lock */
780 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
781 mr. r11,r11 /* Is it locked? */
782 lis r5,0x8000 /* Start up a bit mask */
783 li r11,1 /* Get our lock word */
784 bne- tlbhangp /* It's locked, go wait... */
785 stwcx. r11,0,r12 /* Try to get it */
786 bne- tlbhangp /* We was beat... */
787
788 li r11,0 /* Lock clear value */
789
790 tlbie r9 /* Invalidate it everywhere */
791
792 beq- cr1,its603p /* It's a 603, skip the tlbsync... */
793
794 eieio /* Make sure that the tlbie happens first */
795 tlbsync /* wait for everyone to catch up */
796 isync
797
798 its603p: stw r11,0(r12) /* Clear the lock */
799 srw r5,r5,r8 /* Make a "free slot" mask */
800 sync /* Make sure of it all */
801
802 lwz r6,4(r6) /* Get the latest reference and change bits */
803 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
804 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
805 lwz r9,PCAallo(r7) /* Get the allocation control bits */
806 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
807 rlwimi r2,r6,0,23,24 ; Put the latest RC bit in mapping copy
808 or r9,r9,r5 /* Set the slot free */
809 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
810 andc r9,r9,r8 /* Clear the auto and lock bits */
811 li r5,pepte1 /* Get displacement to the second word of master pte */
812 stw r9,PCAallo(r7) /* Store the allocation controls */
813
814 protmod: lwarx r11,r5,r3 /* Get the master copy */
815 or r11,r11,r6 /* Merge in latest RC */
816 stwcx. r11,r5,r3 /* Save it back */
817 bne- protmod /* If it changed, try again... */
818
819 protul: li r4,0 /* Get a 0 */
820 stw r2,mmPTEr(r10) ; Save the updated mapping PTE
821 lwz r10,mmnext(r10) /* Get the next */
822
823 sync ; Make sure stores are complete
824
825 stw r4,0(r7) /* Unlock the hash chain */
826 b protnext /* Go get the next one */
827
828 .align 4
829
830 protdone: mtmsr r0 /* Interrupts and translation back on */
831 isync
832 #if PERFTIMES && DEBUG
833 mflr r11
834 li r3,27
835 bl EXT(dbgLog2) ; Start of hw_add_map
836 mtlr r11
837 #endif
838 blr /* Return... */
839
840
841 /*
842 * hw_prot_virt(mapping, prot) - Change the protection of single page
843 *
844 * Upon entry, R3 contains a pointer (real) to a mapping.
845 * R4 contains the PPC protection bits.
846 *
847 * Acquire the lock on the PTEG hash list for the mapping being processed.
848 *
849 * If the current mapping has a PTE entry, we invalidate
850 * it and merge the reference and change information into the phys_entry.
851 *
852 * Next, slam the protection bits into the entry, merge the RC bits,
853 * and unlock the hash list.
854 *
855 * Note that this must be done with both interruptions off and VM off
856 *
857 *
858 */
859
860 .align 5
861 .globl EXT(hw_prot_virt)
862
863 LEXT(hw_prot_virt)
864 #if PERFTIMES && DEBUG
865 mflr r11
866 mr r7,r3
867 // lwz r5,4(r3)
868 li r5,0x1111
869 li r3,40
870 bl EXT(dbgLog2) ; Start of hw_add_map
871 mr r3,r7
872 mtlr r11
873 #endif
874 mfsprg r9,2 ; Get feature flags
875 mfmsr r0 /* Save the MSR */
876 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
877 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
878 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
879 mtcrf 0x04,r9 ; Set the features
880 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
881
882 bt pfNoMSRirb,hpvNoMSR ; No MSR...
883
884 mtmsr r12 ; Translation and all off
885 isync ; Toss prefetch
886 b hpvNoMSRx
887
888 hpvNoMSR:
889 mr r5,r0
890 mr r7,r3
891 li r0,loadMSR ; Get the MSR setter SC
892 mr r3,r12 ; Get new MSR
893 sc ; Set it
894 mr r3,r7
895 mr r0,r5
896 hpvNoMSRx:
897
898
899
900 /*
901 * Note that we need to to do the interlocked update here because another processor
902 * can be updating the reference and change bits even though the physical entry
903 * is locked. All modifications to the PTE portion of the physical entry must be
904 * done via interlocked update.
905 */
906
907 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
908 lwz r5,mmPTEv(r3) /* Get the virtual address */
909 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
910
911 li r12,1 /* Get the locked value */
912
913 protvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
914 mr. r11,r11 /* Is it locked? */
915 bne- protvLckw1 /* Yeah... */
916 stwcx. r12,0,r7 /* Try to take it */
917 bne- protvLck1 /* Someone else was trying, try again... */
918 b protvSXg1 /* All done... */
919
920 .align 4
921
922 protvLckw1: mr. r11,r11 /* Check if it's already held */
923 beq+ protvLck1 /* It's clear... */
924 lwz r11,0(r7) /* Get lock word again... */
925 b protvLckw1 /* Wait... */
926
927 .align 4
928
929 protvSXg1: isync /* Make sure we haven't used anything yet */
930
931 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
932 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
933
934 rlwinm r9,r5,1,0,3 /* Move in the segment */
935 cmplwi cr7,r6,0 ; Any PTE to invalidate?
936 rlwimi r2,r4,0,30,31 ; Move in the new protection bits
937 rlwinm r8,r5,31,2,25 /* Line it up */
938
939 beq+ cr7,pvnophys /* There's no PTE to invalidate... */
940
941 xor r8,r8,r6 /* Back hash to virt index */
942 rlwimi r9,r5,22,4,9 /* Move in the API */
943 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
944 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
945 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
946 mfspr r11,pvr /* Find out what kind of machine we are */
947 rlwimi r9,r8,6,10,19 /* Create the virtual address */
948 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
949
950 stw r5,0(r6) /* Make the PTE invalid */
951 cmplwi cr1,r11,3 /* Is this a 603? */
952 sync /* Make sure the invalid is stored */
953
954 tlbhangpv: lwarx r11,0,r12 /* Get the TLBIE lock */
955 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
956 mr. r11,r11 /* Is it locked? */
957 lis r5,0x8000 /* Start up a bit mask */
958 li r11,1 /* Get our lock word */
959 bne- tlbhangpv /* It's locked, go wait... */
960 stwcx. r11,0,r12 /* Try to get it */
961 bne- tlbhangpv /* We was beat... */
962
963 li r11,0 /* Lock clear value */
964
965 tlbie r9 /* Invalidate it everywhere */
966
967 beq- cr1,its603pv /* It's a 603, skip the tlbsync... */
968
969 eieio /* Make sure that the tlbie happens first */
970 tlbsync /* wait for everyone to catch up */
971 isync
972
973 its603pv: stw r11,0(r12) /* Clear the lock */
974 srw r5,r5,r8 /* Make a "free slot" mask */
975 sync /* Make sure of it all */
976
977 lwz r6,4(r6) /* Get the latest reference and change bits */
978 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
979 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
980 lwz r9,PCAallo(r7) /* Get the allocation control bits */
981 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
982 lwz r10,mmphysent(r3) ; Get any physical entry
983 or r9,r9,r5 /* Set the slot free */
984 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
985 andc r9,r9,r8 /* Clear the auto and lock bits */
986 mr. r10,r10 ; Is there a physical entry?
987 li r5,pepte1 /* Get displacement to the second word of master pte */
988 stw r9,PCAallo(r7) /* Store the allocation controls */
989 rlwimi r2,r6,0,23,24 ; Stick in RC bits
990 beq- pvnophys ; No physical entry...
991
992 protvmod: lwarx r11,r5,r10 /* Get the master copy */
993 or r11,r11,r6 /* Merge in latest RC */
994 stwcx. r11,r5,r10 /* Save it back */
995 bne- protvmod /* If it changed, try again... */
996
997 pvnophys: li r4,0 /* Get a 0 */
998 stw r2,mmPTEr(r3) ; Set the real part of the PTE
999
1000 sync ; Make sure everything is stored
1001
1002 stw r4,0(r7) /* Unlock the hash chain */
1003 mtmsr r0 ; Restore interrupts and translation
1004 isync
1005
1006 #if PERFTIMES && DEBUG
1007 mflr r11
1008 li r3,41
1009 bl EXT(dbgLog2)
1010 mtlr r11
1011 #endif
1012 blr /* Return... */
1013
1014
1015 /*
1016 * hw_attr_virt(mapping, attr) - Change the attributes of single page
1017 *
1018 * Upon entry, R3 contains a pointer (real) to a mapping.
1019 * R4 contains the WIMG bits.
1020 *
1021 * Acquire the lock on the PTEG hash list for the mapping being processed.
1022 *
1023 * If the current mapping has a PTE entry, we invalidate
1024 * it and merge the reference and change information into the phys_entry.
1025 *
1026 * Next, slam the WIMG bits into the entry, merge the RC bits,
1027 * and unlock the hash list.
1028 *
1029 * Note that this must be done with both interruptions off and VM off
1030 *
1031 *
1032 */
1033
1034 .align 5
1035 .globl EXT(hw_attr_virt)
1036
1037 LEXT(hw_attr_virt)
1038 #if PERFTIMES && DEBUG
1039 mflr r11
1040 mr r7,r3
1041 // lwz r5,4(r3)
1042 li r5,0x1111
1043 li r3,40
1044 bl EXT(dbgLog2) ; Start of hw_add_map
1045 mr r3,r7
1046 mtlr r11
1047 #endif
1048 mfsprg r9,2 ; Get feature flags
1049 mfmsr r0 /* Save the MSR */
1050 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1051 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1052 mtcrf 0x04,r9 ; Set the features
1053 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1054 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1055
1056 bt pfNoMSRirb,havNoMSR ; No MSR...
1057
1058 mtmsr r12 ; Translation and all off
1059 isync ; Toss prefetch
1060 b havNoMSRx
1061
1062 havNoMSR:
1063 mr r5,r0
1064 mr r7,r3
1065 li r0,loadMSR ; Get the MSR setter SC
1066 mr r3,r12 ; Get new MSR
1067 sc ; Set it
1068 mr r3,r7
1069 mr r0,r5
1070 havNoMSRx:
1071
1072 /*
1073 * Note that we need to to do the interlocked update here because another processor
1074 * can be updating the reference and change bits even though the physical entry
1075 * is locked. All modifications to the PTE portion of the physical entry must be
1076 * done via interlocked update.
1077 */
1078
1079 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
1080 lwz r5,mmPTEv(r3) /* Get the virtual address */
1081 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1082
1083 li r12,1 /* Get the locked value */
1084
1085 attrvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1086 mr. r11,r11 /* Is it locked? */
1087 bne- attrvLckw1 /* Yeah... */
1088 stwcx. r12,0,r7 /* Try to take it */
1089 bne- attrvLck1 /* Someone else was trying, try again... */
1090 b attrvSXg1 /* All done... */
1091
1092 .align 4
1093
1094 attrvLckw1: mr. r11,r11 /* Check if it's already held */
1095 beq+ attrvLck1 /* It's clear... */
1096 lwz r11,0(r7) /* Get lock word again... */
1097 b attrvLckw1 /* Wait... */
1098
1099 .align 4
1100
1101 attrvSXg1: isync /* Make sure we haven't used anything yet */
1102
1103 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
1104 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
1105
1106 rlwinm r9,r5,1,0,3 /* Move in the segment */
1107 mr. r6,r6 /* See if there is a PTE here */
1108 rlwimi r2,r4,0,25,28 ; Move in the new attribute bits
1109 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1110
1111 beq+ avnophys /* There's no PTE to invalidate... */
1112
1113 xor r8,r8,r6 /* Back hash to virt index */
1114 rlwimi r9,r5,22,4,9 /* Move in the API */
1115 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1116 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1117 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1118 mfspr r11,pvr /* Find out what kind of machine we are */
1119 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1120 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
1121 stw r5,0(r6) /* Make the PTE invalid */
1122 cmplwi cr1,r11,3 /* Is this a 603? */
1123 sync /* Make sure the invalid is stored */
1124
1125 tlbhangav: lwarx r11,0,r12 /* Get the TLBIE lock */
1126 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1127 mr. r11,r11 /* Is it locked? */
1128 lis r5,0x8000 /* Start up a bit mask */
1129 li r11,1 /* Get our lock word */
1130 bne- tlbhangav /* It's locked, go wait... */
1131 stwcx. r11,0,r12 /* Try to get it */
1132 bne- tlbhangav /* We was beat... */
1133
1134 li r11,0 /* Lock clear value */
1135
1136 tlbie r9 /* Invalidate it everywhere */
1137
1138 beq- cr1,its603av /* It's a 603, skip the tlbsync... */
1139
1140 eieio /* Make sure that the tlbie happens first */
1141 tlbsync /* wait for everyone to catch up */
1142 isync
1143
1144 its603av: stw r11,0(r12) /* Clear the lock */
1145 srw r5,r5,r8 /* Make a "free slot" mask */
1146 sync /* Make sure of it all */
1147
1148 lwz r6,4(r6) /* Get the latest reference and change bits */
1149 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
1150 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
1151 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1152 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1153 lwz r10,mmphysent(r3) ; Get any physical entry
1154 or r9,r9,r5 /* Set the slot free */
1155 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1156 andc r9,r9,r8 /* Clear the auto and lock bits */
1157 mr. r10,r10 ; Is there a physical entry?
1158 li r5,pepte1 /* Get displacement to the second word of master pte */
1159 stw r9,PCAallo(r7) /* Store the allocation controls */
1160 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1161 beq- avnophys ; No physical entry...
1162
1163 attrvmod: lwarx r11,r5,r10 /* Get the master copy */
1164 or r11,r11,r6 /* Merge in latest RC */
1165 stwcx. r11,r5,r10 /* Save it back */
1166 bne- attrvmod /* If it changed, try again... */
1167
1168 avnophys: li r4,0 /* Get a 0 */
1169 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1170
1171 sync ; Make sure that everything is updated
1172
1173 stw r4,0(r7) /* Unlock the hash chain */
1174
1175 rlwinm r2,r2,0,0,19 ; Clear back to page boundary
1176
1177 attrflsh: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1178 dcbst r2,r4 ; Flush cache because we changed attributes
1179 addi r4,r4,32 ; Bump up cache
1180 blt+ attrflsh ; Do the whole page...
1181 sync
1182
1183 li r4,0
1184 attrimvl: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1185 dcbi r2,r4 ; Invalidate dcache because we changed attributes
1186 icbi r2,r4 ; Invalidate icache because we changed attributes
1187 addi r4,r4,32 ; Bump up cache
1188 blt+ attrimvl ; Do the whole page...
1189 sync
1190
1191 mtmsr r0 ; Restore interrupts and translation
1192 isync
1193
1194 #if PERFTIMES && DEBUG
1195 mflr r11
1196 li r3,41
1197 bl EXT(dbgLog2)
1198 mtlr r11
1199 #endif
1200 blr /* Return... */
1201
1202
1203 /*
1204 * hw_pte_comm(physent) - Do something to the PTE pointing to a physical page
1205 *
1206 * Upon entry, R3 contains a pointer to a physical entry which is locked.
1207 * Note that this must be done with both interruptions off and VM off
1208 *
1209 * First, we set up CRs 5 and 7 to indicate which of the 7 calls this is.
1210 *
1211 * Now we scan the mappings to invalidate any with an active PTE.
1212 *
1213 * Acquire the lock on the PTEG hash list for the mapping being processed.
1214 *
1215 * If the current mapping has a PTE entry, we invalidate
1216 * it and merge the reference and change information into the phys_entry.
1217 *
1218 * Next, unlock the hash list and go on to the next mapping.
1219 *
1220 *
1221 *
1222 */
1223
1224 .align 5
1225 .globl EXT(hw_inv_all)
1226
1227 LEXT(hw_inv_all)
1228
1229 li r9,0x800 /* Indicate invalidate all */
1230 li r2,0 ; No inadvertant modifications please
1231 b hw_pte_comm /* Join in the fun... */
1232
1233
1234 .align 5
1235 .globl EXT(hw_tst_mod)
1236
1237 LEXT(hw_tst_mod)
1238
1239 lwz r8,pepte1(r3) ; Get the saved PTE image
1240 li r9,0x400 /* Indicate test modify */
1241 li r2,0 ; No inadvertant modifications please
1242 rlwinm. r8,r8,25,31,31 ; Make change bit into return code
1243 beq+ hw_pte_comm ; Assume we do not know if it is set...
1244 mr r3,r8 ; Set the return code
1245 blr ; Return quickly...
1246
1247 .align 5
1248 .globl EXT(hw_tst_ref)
1249
1250 LEXT(hw_tst_ref)
1251 lwz r8,pepte1(r3) ; Get the saved PTE image
1252 li r9,0x200 /* Indicate test reference bit */
1253 li r2,0 ; No inadvertant modifications please
1254 rlwinm. r8,r8,24,31,31 ; Make reference bit into return code
1255 beq+ hw_pte_comm ; Assume we do not know if it is set...
1256 mr r3,r8 ; Set the return code
1257 blr ; Return quickly...
1258
1259 /*
1260 * Note that the following are all in one CR for ease of use later
1261 */
1262 .align 4
1263 .globl EXT(hw_set_mod)
1264
1265 LEXT(hw_set_mod)
1266
1267 li r9,0x008 /* Indicate set modify bit */
1268 li r2,0x4 ; Set set C, clear none
1269 b hw_pte_comm /* Join in the fun... */
1270
1271
1272 .align 4
1273 .globl EXT(hw_clr_mod)
1274
1275 LEXT(hw_clr_mod)
1276
1277 li r9,0x004 /* Indicate clear modify bit */
1278 li r2,0x1 ; Set set none, clear C
1279 b hw_pte_comm /* Join in the fun... */
1280
1281
1282 .align 4
1283 .globl EXT(hw_set_ref)
1284
1285 LEXT(hw_set_ref)
1286
1287 li r9,0x002 /* Indicate set reference */
1288 li r2,0x8 ; Set set R, clear none
1289 b hw_pte_comm /* Join in the fun... */
1290
1291 .align 5
1292 .globl EXT(hw_clr_ref)
1293
1294 LEXT(hw_clr_ref)
1295
1296 li r9,0x001 /* Indicate clear reference bit */
1297 li r2,0x2 ; Set set none, clear R
1298 b hw_pte_comm /* Join in the fun... */
1299
1300
1301 /*
1302 * This is the common stuff.
1303 */
1304
1305 .align 5
1306
1307 hw_pte_comm: /* Common routine for pte tests and manips */
1308
1309 #if PERFTIMES && DEBUG
1310 mflr r11
1311 mr r7,r3
1312 lwz r4,4(r3)
1313 mr r5,r9
1314 li r3,28
1315 bl EXT(dbgLog2) ; Start of hw_add_map
1316 mr r3,r7
1317 mtlr r11
1318 #endif
1319 mfsprg r8,2 ; Get feature flags
1320 lwz r10,pephyslink(r3) /* Get the first mapping block */
1321 mfmsr r0 /* Save the MSR */
1322 rlwinm. r10,r10,0,0,26 ; Clear out the flags from first link and see if we are mapped
1323 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1324 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1325 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1326 mtcrf 0x04,r8 ; Set the features
1327 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1328 beq- comnmap ; No mapping
1329 dcbt br0,r10 ; Touch the first mapping in before the isync
1330
1331 comnmap:
1332
1333 bt pfNoMSRirb,hpcNoMSR ; No MSR...
1334
1335 mtmsr r12 ; Translation and all off
1336 isync ; Toss prefetch
1337 b hpcNoMSRx
1338
1339 hpcNoMSR:
1340 mr r5,r0
1341 mr r7,r3
1342 li r0,loadMSR ; Get the MSR setter SC
1343 mr r3,r12 ; Get new MSR
1344 sc ; Set it
1345 mr r3,r7
1346 mr r0,r5
1347 hpcNoMSRx:
1348
1349 mtcrf 0x05,r9 /* Set the call type flags into cr5 and 7 */
1350
1351 beq- commdone ; Nothing us mapped to this page...
1352 b commnext ; Jump to first pass (jump here so we can align loop)
1353
1354 .align 5
1355
1356 commnext: lwz r11,mmnext(r10) ; Get the pointer to the next mapping (if any)
1357 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
1358 lwz r5,mmPTEv(r10) /* Get the virtual address */
1359 mr. r11,r11 ; More mappings to go?
1360 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1361 beq- commnxtch ; No more mappings...
1362 dcbt br0,r11 ; Touch the next mapping
1363
1364 commnxtch: li r12,1 /* Get the locked value */
1365
1366 commLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1367 mr. r11,r11 /* Is it locked? */
1368 bne- commLckw1 /* Yeah... */
1369 stwcx. r12,0,r7 /* Try to take it */
1370 bne- commLck1 /* Someone else was trying, try again... */
1371 b commSXg1 /* All done... */
1372
1373 .align 4
1374
1375 commLckw1: mr. r11,r11 /* Check if it's already held */
1376 beq+ commLck1 /* It's clear... */
1377 lwz r11,0(r7) /* Get lock word again... */
1378 b commLckw1 /* Wait... */
1379
1380 .align 4
1381
1382 commSXg1: isync /* Make sure we haven't used anything yet */
1383
1384 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
1385
1386 rlwinm r9,r5,1,0,3 /* Move in the segment */
1387 mr. r6,r6 /* See if there is a PTE entry here */
1388 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1389
1390 beq+ commul /* There's no PTE to invalidate... */
1391
1392 xor r8,r8,r6 /* Back hash to virt index */
1393 rlwimi r9,r5,22,4,9 /* Move in the API */
1394 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1395 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1396 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1397 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1398
1399 stw r5,0(r6) /* Make the PTE invalid */
1400 mfspr r4,pvr /* Find out what kind of machine we are */
1401 sync /* Make sure the invalid is stored */
1402
1403 tlbhangco: lwarx r11,0,r12 /* Get the TLBIE lock */
1404 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1405 mr. r11,r11 /* Is it locked? */
1406 lis r5,0x8000 /* Start up a bit mask */
1407 li r11,1 /* Get our lock word */
1408 bne- tlbhangco /* It's locked, go wait... */
1409 stwcx. r11,0,r12 /* Try to get it */
1410 bne- tlbhangco /* We was beat... */
1411
1412 rlwinm r4,r4,16,16,31 /* Isolate CPU type */
1413 li r11,0 /* Lock clear value */
1414 cmplwi r4,3 /* Is this a 603? */
1415
1416 tlbie r9 /* Invalidate it everywhere */
1417
1418 beq- its603co /* It's a 603, skip the tlbsync... */
1419
1420 eieio /* Make sure that the tlbie happens first */
1421 tlbsync /* wait for everyone to catch up */
1422 isync
1423
1424 its603co: stw r11,0(r12) /* Clear the lock */
1425 srw r5,r5,r8 /* Make a "free slot" mask */
1426 sync /* Make sure of it all */
1427
1428 lwz r6,4(r6) /* Get the latest reference and change bits */
1429 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1430 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
1431 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1432 or r9,r9,r5 /* Set the slot free */
1433 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1434 rlwinm r4,r6,0,23,24 /* Extract the RC bits */
1435 andc r9,r9,r8 /* Clear the auto and lock bits */
1436 li r5,pepte1 /* Get displacement to the second word of master pte */
1437 stw r9,PCAallo(r7) /* Store the allocation controls */
1438
1439 commmod: lwarx r11,r5,r3 /* Get the master copy */
1440 or r11,r11,r4 /* Merge in latest RC */
1441 stwcx. r11,r5,r3 /* Save it back */
1442 bne- commmod /* If it changed, try again... */
1443 b commulnl ; Skip loading the old real part...
1444
1445 commul: lwz r6,mmPTEr(r10) ; Get the real part
1446
1447 commulnl: rlwinm r12,r2,5,23,24 ; Get the "set" bits
1448 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1449
1450 or r6,r6,r12 ; Set the bits to come on
1451 andc r6,r6,r11 ; Clear those to come off
1452
1453 stw r6,mmPTEr(r10) ; Set the new RC
1454
1455 lwz r10,mmnext(r10) /* Get the next */
1456 li r4,0 /* Make sure this is 0 */
1457 mr. r10,r10 ; Is there another mapping?
1458
1459 sync ; Make sure that all is saved
1460
1461 stw r4,0(r7) /* Unlock the hash chain */
1462 bne+ commnext ; Go get the next if there is one...
1463
1464 /*
1465 * Now that all PTEs have been invalidated and the master RC bits are updated,
1466 * we go ahead and figure out what the original call was and do that. Note that
1467 * another processor could be messing around and may have entered one of the
1468 * PTEs we just removed into the hash table. Too bad... You takes yer chances.
1469 * If there's a problem with that, it's because some higher level was trying to
1470 * do something with a mapping that it shouldn't. So, the problem's really
1471 * there, nyaaa, nyaaa, nyaaa... nyaaa, nyaaa... nyaaa! So there!
1472 */
1473
1474 commdone: li r5,pepte1 /* Get displacement to the second word of master pte */
1475 blt cr5,commfini /* We're finished, it was invalidate all... */
1476 bgt cr5,commtst /* It was a test modified... */
1477 beq cr5,commtst /* It was a test reference... */
1478
1479 /*
1480 * Note that we need to to do the interlocked update here because another processor
1481 * can be updating the reference and change bits even though the physical entry
1482 * is locked. All modifications to the PTE portion of the physical entry must be
1483 * done via interlocked update.
1484 */
1485
1486 rlwinm r12,r2,5,23,24 ; Get the "set" bits
1487 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1488
1489 commcng: lwarx r8,r5,r3 /* Get the master copy */
1490 or r8,r8,r12 ; Set the bits to come on
1491 andc r8,r8,r11 ; Clear those to come off
1492 stwcx. r8,r5,r3 /* Save it back */
1493 bne- commcng /* If it changed, try again... */
1494
1495 mtmsr r0 /* Interrupts and translation back on */
1496 isync
1497 #if PERFTIMES && DEBUG
1498 mflr r11
1499 mr r4,r3
1500 li r3,29
1501 bl EXT(dbgLog2) ; Start of hw_add_map
1502 mr r3,r4
1503 mtlr r11
1504 #endif
1505 blr /* Return... */
1506
1507 .align 4
1508
1509 commtst: lwz r8,pepte1(r3) /* Get the PTE */
1510 bne- cr5,commtcb ; This is for the change bit...
1511 mtmsr r0 ; Interrupts and translation back on
1512 rlwinm r3,r8,24,31,31 ; Copy reference bit to bit 31
1513 isync ; Toss prefetching
1514 #if PERFTIMES && DEBUG
1515 mflr r11
1516 mr r4,r3
1517 li r3,29
1518 bl EXT(dbgLog2) ; Start of hw_add_map
1519 mr r3,r4
1520 mtlr r11
1521 #endif
1522 blr ; Return...
1523
1524 .align 4
1525
1526 commtcb: rlwinm r3,r8,25,31,31 ; Copy change bit to bit 31
1527
1528 commfini: mtmsr r0 ; Interrupts and translation back on
1529 isync ; Toss prefetching
1530
1531 #if PERFTIMES && DEBUG
1532 mflr r11
1533 mr r4,r3
1534 li r3,29
1535 bl EXT(dbgLog2) ; Start of hw_add_map
1536 mr r3,r4
1537 mtlr r11
1538 #endif
1539 blr ; Return...
1540
1541 /*
1542 * unsigned int hw_test_rc(mapping *mp, boolean_t reset);
1543 *
1544 * Test the RC bits for a specific mapping. If reset is non-zero, clear them.
1545 * We return the RC value in the mapping if there is no PTE or if C is set.
1546 * (Note: R is always set with C.) Otherwise we invalidate the PTE and
1547 * collect the RC bits from there, also merging them into the global copy.
1548 *
1549 * For now, we release the PTE slot and leave it invalid. In the future, we
1550 * may consider re-validating and not releasing the slot. It would be faster,
1551 * but our current implementation says that we will have not PTEs valid
1552 * without the reference bit set.
1553 *
1554 * We will special case C==1 && not reset to just return the RC.
1555 *
1556 * Probable state is worst performance state: C bit is off and there is a PTE.
1557 */
1558
1559 #define htrReset 31
1560
1561 .align 5
1562 .globl EXT(hw_test_rc)
1563
1564 LEXT(hw_test_rc)
1565
1566 mfsprg r9,2 ; Get feature flags
1567 mfmsr r0 ; Save the MSR
1568 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1569 mr. r4,r4 ; See if we have a reset to do later
1570 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1571 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruption mask
1572 crnot htrReset,cr0_eq ; Remember reset
1573 mtcrf 0x04,r9 ; Set the features
1574 rlwinm r12,r12,0,28,25 ; Clear IR and DR
1575
1576 bt pfNoMSRirb,htrNoMSR ; No MSR...
1577
1578 mtmsr r12 ; Translation and all off
1579 isync ; Toss prefetch
1580 b htrNoMSRx
1581
1582 htrNoMSR:
1583 mr r2,r0
1584 mr r7,r3
1585 li r0,loadMSR ; Get the MSR setter SC
1586 mr r3,r12 ; Get new MSR
1587 sc ; Set it
1588 mr r3,r7
1589 mr r0,r2
1590 htrNoMSRx:
1591
1592 lwz r2,mmPTEr(r3) ; Get the real part
1593 lwz r7,mmPTEhash(r3) ; Get pointer to hash list anchor
1594 rlwinm. r12,r2,0,24,24 ; Is the change bit on?
1595 lwz r5,mmPTEv(r3) ; Get the virtual address
1596 crnor cr0_eq,cr0_eq,htrReset ; Set if C=1 && not reset
1597 rlwinm r7,r7,0,0,25 ; Round hash list down to PCA boundary
1598 bt cr0_eq,htrcset ; Special case changed but no reset case...
1599
1600 li r12,1 ; Get the locked value
1601
1602 htrLck1: lwarx r11,0,r7 ; Get the PTEG lock
1603 mr. r11,r11 ; Is it locked?
1604 bne- htrLckw1 ; Yeah...
1605 stwcx. r12,0,r7 ; Try to take it
1606 bne- htrLck1 ; Someone else was trying, try again...
1607 b htrSXg1 ; All done...
1608
1609 .align 4
1610
1611 htrLckw1: mr. r11,r11 ; Check if it is already held
1612 beq+ htrLck1 ; It is clear...
1613 lwz r11,0(r7) ; Get lock word again...
1614 b htrLckw1 ; Wait...
1615
1616 .align 4
1617
1618 htrSXg1: isync ; Make sure we have not used anything yet
1619
1620 lwz r6,mmPTEent(r3) ; Get the pointer to the PTE now that the lock is set
1621 lwz r2,mmPTEr(r3) ; Get the mapping copy of the real part
1622
1623 rlwinm r9,r5,1,0,3 ; Move in the segment
1624 mr. r6,r6 ; Any PTE to invalidate?
1625 rlwinm r8,r5,31,2,25 ; Line it up
1626
1627 beq+ htrnopte ; There is no PTE to invalidate...
1628
1629 xor r8,r8,r6 ; Back hash to virt index
1630 rlwimi r9,r5,22,4,9 ; Move in the API
1631 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
1632 rlwinm r5,r5,0,1,31 ; Clear the valid bit
1633 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
1634 mfspr r11,pvr ; Find out what kind of machine we are
1635 rlwimi r9,r8,6,10,19 ; Create the virtual address
1636 rlwinm r11,r11,16,16,31 ; Isolate CPU type
1637
1638 stw r5,0(r6) ; Make the PTE invalid
1639 cmplwi cr1,r11,3 ; Is this a 603?
1640 sync ; Make sure the invalid is stored
1641
1642 htrtlbhang: lwarx r11,0,r12 ; Get the TLBIE lock
1643 rlwinm r8,r6,29,29,31 ; Get the bit position of entry
1644 mr. r11,r11 ; Is it locked?
1645 lis r5,0x8000 ; Start up a bit mask
1646 li r11,1 ; Get our lock word
1647 bne- htrtlbhang ; It is locked, go wait...
1648 stwcx. r11,0,r12 ; Try to get it
1649 bne- htrtlbhang ; We was beat...
1650
1651 li r11,0 ; Lock clear value
1652
1653 tlbie r9 ;Invalidate it everywhere
1654
1655 beq- cr1,htr603 ; It is a 603, skip the tlbsync...
1656
1657 eieio ; Make sure that the tlbie happens first
1658 tlbsync ; wait for everyone to catch up
1659 isync
1660
1661 htr603: stw r11,0(r12) ; Clear the lock
1662 srw r5,r5,r8 ; Make a "free slot" mask
1663 sync ; Make sure of it all
1664
1665 lwz r6,4(r6) ; Get the latest reference and change bits
1666 stw r11,mmPTEent(r3) ; Clear the pointer to the PTE
1667 rlwinm r6,r6,0,23,24 ; Extract the RC bits
1668 lwz r9,PCAallo(r7) ; Get the allocation control bits
1669 rlwinm r8,r5,24,8,15 ; Make the autogen bit to turn off
1670 lwz r10,mmphysent(r3) ; Get any physical entry
1671 or r9,r9,r5 ; Set the slot free
1672 rlwimi r8,r8,24,16,23 ; Get lock bit mask to turn it off
1673 andc r9,r9,r8 ; Clear the auto and lock bits
1674 mr. r10,r10 ; Is there a physical entry?
1675 li r5,pepte1 ; Get displacement to the second word of master pte
1676 stw r9,PCAallo(r7) ; Store the allocation controls
1677 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1678 beq- htrnopte ; No physical entry...
1679
1680 htrmrc: lwarx r11,r5,r10 ; Get the master copy
1681 or r11,r11,r6 ; Merge in latest RC
1682 stwcx. r11,r5,r10 ; Save it back
1683 bne- htrmrc ; If it changed, try again...
1684
1685 htrnopte: rlwinm r5,r2,25,30,31 ; Position RC and mask off
1686 bf htrReset,htrnorst ; No reset to do...
1687 rlwinm r2,r2,0,25,22 ; Clear the RC if requested
1688
1689 htrnorst: li r4,0 ; Get a 0
1690 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1691
1692 sync ; Make sure that stuff is all stored
1693
1694 stw r4,0(r7) ; Unlock the hash chain
1695
1696 mr r3,r5 ; Get the old RC to pass back
1697 mtmsr r0 ; Restore interrupts and translation
1698 isync
1699 blr ; Return...
1700
1701 .align 4
1702
1703 htrcset: rlwinm r3,r2,25,30,31 ; Position RC and mask off
1704 mtmsr r0 ; Restore interrupts and translation
1705 isync
1706 blr ; Return...
1707
1708
1709 /*
1710 * hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) - Sets the default physical page attributes
1711 *
1712 * Note that this must be done with both interruptions off and VM off
1713 * Move the passed in attributes into the pte image in the phys entry
1714 *
1715 *
1716 */
1717
1718 .align 5
1719 .globl EXT(hw_phys_attr)
1720
1721 LEXT(hw_phys_attr)
1722
1723 #if PERFTIMES && DEBUG
1724 mflr r11
1725 mr r8,r3
1726 mr r7,r5
1727 mr r5,r4
1728 // lwz r4,4(r3)
1729 li r4,0x1111
1730 li r3,30
1731 bl EXT(dbgLog2) ; Start of hw_add_map
1732 mr r3,r8
1733 mr r4,r5
1734 mr r5,r7
1735 mtlr r11
1736 #endif
1737 mfsprg r9,2 ; Get feature flags
1738 mfmsr r0 /* Save the MSR */
1739 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1740 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1741 andi. r5,r5,0x0078 /* Clean up the WIMG */
1742 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1743 mtcrf 0x04,r9 ; Set the features
1744 rlwimi r5,r4,0,30,31 /* Move the protection into the wimg register */
1745 la r6,pepte1(r3) /* Point to the default pte */
1746 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1747
1748 bt pfNoMSRirb,hpaNoMSR ; No MSR...
1749
1750 mtmsr r12 ; Translation and all off
1751 isync ; Toss prefetch
1752 b hpaNoMSRx
1753
1754 hpaNoMSR:
1755 mr r10,r0
1756 mr r4,r3
1757 li r0,loadMSR ; Get the MSR setter SC
1758 mr r3,r12 ; Get new MSR
1759 sc ; Set it
1760 mr r3,r4
1761 mr r0,r10
1762 hpaNoMSRx:
1763
1764 atmattr: lwarx r10,0,r6 /* Get the pte */
1765 rlwimi r10,r5,0,25,31 /* Move in the new attributes */
1766 stwcx. r10,0,r6 /* Try it on for size */
1767 bne- atmattr /* Someone else was trying, try again... */
1768
1769 mtmsr r0 /* Interrupts and translation back on */
1770 isync
1771 #if PERFTIMES && DEBUG
1772 mflr r11
1773 mr r4,r10
1774 li r3,31
1775 bl EXT(dbgLog2) ; Start of hw_add_map
1776 mtlr r11
1777 #endif
1778 blr /* All done... */
1779
1780
1781
1782 /*
1783 * handlePF - handle a page fault interruption
1784 *
1785 * If the fault can be handled, this routine will RFI directly,
1786 * otherwise it will return with all registers as in entry.
1787 *
1788 * Upon entry, state and all registers have been saved in savearea.
1789 * This is pointed to by R13.
1790 * IR and DR are off, interrupts are masked,
1791 * Floating point be disabled.
1792 * R3 is the interrupt code.
1793 *
1794 * If we bail, we must restore cr5, and all registers except 6 and
1795 * 3.
1796 *
1797 */
1798
1799 .align 5
1800 .globl EXT(handlePF)
1801
1802 LEXT(handlePF)
1803
1804 /*
1805 * This first part does a quick check to see if we can handle the fault.
1806 * We can't handle any kind of protection exceptions here, so we pass
1807 * them up to the next level.
1808 *
1809 * The mapping lists are kept in MRS (most recently stolen)
1810 * order on queues anchored within from the
1811 * PTEG to which the virtual address hashes. This is further segregated by
1812 * the low-order 3 bits of the VSID XORed with the segment number and XORed
1813 * with bits 4-7 of the vaddr in an attempt to keep the searches
1814 * short.
1815 *
1816 * MRS is handled by moving the entry to the head of its list when stolen in the
1817 * assumption that it will be revalidated soon. Entries are created on the head
1818 * of the list because they will be used again almost immediately.
1819 *
1820 * We need R13 set to the savearea, R3 set to the interrupt code, and R2
1821 * set to the per_proc.
1822 *
1823 * NOTE: In order for a page-fault redrive to work, the translation miss
1824 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
1825 * before we come here.
1826 */
1827
1828 cmplwi r3,T_INSTRUCTION_ACCESS /* See if this is for the instruction */
1829 lwz r8,savesrr1(r13) ; Get the MSR to determine mode
1830 beq- gotIfetch ; We have an IFETCH here...
1831
1832 lwz r7,savedsisr(r13) /* Get the DSISR */
1833 lwz r6,savedar(r13) /* Get the fault address */
1834 b ckIfProt ; Go check if this is a protection fault...
1835
1836 gotIfetch: mr r7,r8 ; IFETCH info is in SRR1
1837 lwz r6,savesrr0(r13) /* Get the instruction address */
1838
1839 ckIfProt: rlwinm. r7,r7,0,1,1 ; Is this a protection exception?
1840 beqlr- ; Yes... (probably not though)
1841
1842 /*
1843 * We will need to restore registers if we bail after this point.
1844 * Note that at this point several SRs have been changed to the kernel versions.
1845 * Therefore, for these we must build these values.
1846 */
1847
1848 #if PERFTIMES && DEBUG
1849 mflr r11
1850 mr r5,r6
1851 mr r4,r3
1852 li r3,32
1853 bl EXT(dbgLog2) ; Start of hw_add_map
1854 mr r3,r4
1855 mtlr r11
1856 mfsprg r2,0
1857 #endif
1858 lwz r3,PP_USERPMAP(r2) ; Get the user pmap (not needed if kernel access, but optimize for user??)
1859 rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Supervisor state access?
1860 rlwinm r5,r6,6,26,29 ; Get index to the segment slot
1861 eqv r1,r1,r1 ; Fill the bottom with foxes
1862 bne+ notsuper ; Go do the user mode interrupt stuff...
1863
1864 cmplwi cr1,r5,SR_COPYIN_NUM*4 ; See if this is the copyin/copyout segment
1865 rlwinm r3,r6,24,8,11 ; Make the kernel VSID
1866 bne+ cr1,havevsid ; We are done if we do not want the copyin/out guy...
1867
1868 mfsr r3,SR_COPYIN ; Get the copy vsid
1869 b havevsid ; Join up...
1870
1871 .align 5
1872
1873 notsuper: addi r5,r5,PMAP_SEGS ; Get offset to table
1874 lwzx r3,r3,r5 ; Get the VSID
1875
1876 havevsid: mfspr r5,sdr1 /* Get hash table base and size */
1877 cror cr1_eq,cr0_eq,cr0_eq ; Remember if kernel fault for later
1878 rlwinm r9,r6,2,2,5 ; Move nybble 1 up to 0 (keep aligned with VSID)
1879 rlwimi r1,r5,16,0,15 /* Make table size -1 out of mask */
1880 rlwinm r3,r3,6,2,25 /* Position the space for the VSID */
1881 rlwinm r7,r6,26,10,25 /* Isolate the page index */
1882 xor r9,r9,r3 ; Splooch vaddr nybble 0 (from VSID) and 1 together
1883 or r8,r5,r1 /* Point to the last byte in table */
1884 xor r7,r7,r3 /* Get primary hash */
1885 rlwinm r3,r3,1,1,24 /* Position VSID for pte ID */
1886 addi r8,r8,1 /* Point to the PTEG Control Area */
1887 rlwinm r9,r9,8,27,29 ; Get splooched bits in place
1888 and r7,r7,r1 /* Wrap the hash */
1889 rlwimi r3,r6,10,26,31 /* Move API into pte ID */
1890 add r8,r8,r7 /* Point to our PCA entry */
1891 rlwinm r12,r3,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
1892 la r11,PCAhash(r8) /* Point to the mapping hash area */
1893 xor r9,r9,r12 ; Finish splooching nybble 0, 1, and the low bits of the VSID
1894
1895
1896 /*
1897 * We have about as much as we need to start searching the autogen (aka block maps)
1898 * and mappings. From here on, any kind of failure will bail, and
1899 * contention will either bail or restart from here.
1900 *
1901 *
1902 */
1903
1904 li r12,1 /* Get the locked value */
1905 dcbt 0,r11 /* We'll need the hash area in a sec, so get it */
1906 add r11,r11,r9 /* Point to the right mapping hash slot */
1907
1908 ptegLck: lwarx r10,0,r8 /* Get the PTEG lock */
1909 mr. r10,r10 /* Is it locked? */
1910 bne- ptegLckw /* Yeah... */
1911 stwcx. r12,0,r8 /* Take take it */
1912 bne- ptegLck /* Someone else was trying, try again... */
1913 b ptegSXg /* All done... */
1914
1915 .align 4
1916
1917 ptegLckw: mr. r10,r10 /* Check if it's already held */
1918 beq+ ptegLck /* It's clear... */
1919 lwz r10,0(r8) /* Get lock word again... */
1920 b ptegLckw /* Wait... */
1921
1922 .align 5
1923
1924 nop ; Force ISYNC to last instruction in IFETCH
1925 nop
1926 nop
1927
1928 ptegSXg: isync /* Make sure we haven't used anything yet */
1929
1930 lwz r9,0(r11) /* Pick up first mapping block */
1931 mr r5,r11 /* Get the address of the anchor */
1932 mr r7,r9 /* Save the first in line */
1933 b findmap ; Take space and force loop to cache line
1934
1935 findmap: mr. r12,r9 /* Are there more? */
1936 beq- tryAuto /* Nope, nothing in mapping list for us... */
1937
1938 lwz r10,mmPTEv(r12) /* Get unique PTE identification */
1939 lwz r9,mmhashnext(r12) /* Get the chain, just in case */
1940 cmplw r10,r3 /* Did we hit our PTE? */
1941 lwz r0,mmPTEent(r12) /* Get the pointer to the hash table entry */
1942 mr r5,r12 /* Save the current as previous */
1943 bne- findmap ; Nothing here, try the next...
1944
1945 ; Cache line boundary here
1946
1947 cmplwi cr1,r0,0 /* Is there actually a PTE entry in the hash? */
1948 lwz r2,mmphysent(r12) /* Get the physical entry */
1949 bne- cr1,MustBeOK /* There's an entry in the hash table, so, this must
1950 have been taken care of already... */
1951 lis r4,0x8000 ; Tell PTE inserter that this was not an auto
1952 cmplwi cr2,r2,0 /* Is there a physical entry? */
1953 li r0,0x0100 /* Force on the reference bit whenever we make a PTE valid */
1954 bne+ cr2,gotphys /* Skip down if we have a physical entry */
1955 li r0,0x0180 /* When there is no physical entry, force on
1956 both R and C bits to keep hardware from
1957 updating the PTE to set them. We don't
1958 keep track of RC for I/O areas, so this is ok */
1959
1960 gotphys: lwz r2,mmPTEr(r12) ; Get the second part of the PTE
1961 b insert /* Go insert into the PTEG... */
1962
1963 MustBeOK: li r10,0 /* Get lock clear value */
1964 li r3,T_IN_VAIN /* Say that we handled it */
1965 stw r10,PCAlock(r8) /* Clear the PTEG lock */
1966
1967 #if PERFTIMES && DEBUG
1968 mflr r11
1969 mr r4,r3
1970 li r3,33
1971 bl EXT(dbgLog2) ; Start of hw_add_map
1972 mr r3,r4
1973 mtlr r11
1974 #endif
1975 blr /* Blow back and handle exception */
1976
1977
1978
1979 /*
1980 * We couldn't find it in the mapping list. As a last try, we will
1981 * see if we can autogen it from the block mapped list.
1982 *
1983 * A block mapped area is defined as a contiguous virtual area that is mapped to
1984 * a contiguous physical area. The olde-tyme IBM VM/XA Interpretive Execution
1985 * architecture referred to this as a V=F, or Virtual = Fixed area.
1986 *
1987 * We consider a V=F area to be a single entity, adjacent areas can not be merged
1988 * or overlapped. The protection and memory attributes are the same and reference
1989 * and change indications are not kept. The areas are not considered part of the
1990 * physical RAM of the machine and do not have any associated physical table
1991 * entries. Their primary use is intended for mapped I/O areas (e.g., framebuffers)
1992 * although certain areas of RAM, such as the kernel V=R memory, can be mapped.
1993 *
1994 * We also have a problem in the case of copyin/out: that access is done
1995 * within the kernel for a user address. Unfortunately, the user isn't
1996 * necessarily the current guy. That means that we don't have access to the
1997 * right autogen list. We can't support this kind of access. So, we need to do
1998 * a quick check here and cause a fault if an attempt to copyin or out to
1999 * any autogenned area.
2000 *
2001 * The lists must be kept short.
2002 *
2003 * NOTE: kernel_pmap_store must be in V=R storage!!!!!!!!!!!!!!
2004 */
2005
2006 .align 5
2007
2008 tryAuto: rlwinm. r11,r3,0,5,24 ; Check if this is a kernel VSID
2009 lis r10,HIGH_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the top part of kernel block map anchor
2010 crandc cr0_eq,cr1_eq,cr0_eq ; Set if kernel access and non-zero VSID (copyin or copyout)
2011 mfsprg r11,0 ; Get the per_proc area
2012 beq- cr0,realFault ; Can not autogen for copyin/copyout...
2013 ori r10,r10,LOW_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the bottom part
2014 beq- cr1,bmInKernel ; We are in kernel... (cr1 set way back at entry)
2015
2016 lwz r10,PP_USERPMAP(r11) ; Get the user pmap
2017 la r10,PMAP_BMAPS(r10) ; Point to the chain anchor
2018 b bmInKernel ; Jump over alignment gap...
2019 nop
2020 nop
2021 nop
2022 nop
2023 nop
2024 nop
2025 bmInKernel:
2026 #ifndef CHIP_ERRATA_MAX_V1
2027 lwarx r9,0,r10
2028 #endif /* CHIP_ERRATA_MAX_V1 */
2029
2030 bmapLck: lwarx r9,0,r10 ; Get the block map anchor and lock
2031 rlwinm. r5,r9,0,31,31 ; Is it locked?
2032 ori r5,r5,1 ; Set the lock
2033 bne- bmapLckw ; Yeah...
2034 stwcx. r5,0,r10 ; Lock the bmap list
2035 bne- bmapLck ; Someone else was trying, try again...
2036 b bmapSXg ; All done...
2037
2038 .align 4
2039
2040 bmapLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2041 beq+ bmapLck ; Not no more...
2042 lwz r9,0(r10) ; Get lock word again...
2043 b bmapLckw ; Check it out...
2044
2045 .align 5
2046
2047 nop ; Force ISYNC to last instruction in IFETCH
2048 nop
2049 nop
2050
2051 bmapSXg: rlwinm. r4,r9,0,0,26 ; Clear out flags and lock
2052 isync ; Make sure we have not used anything yet
2053 bne+ findAuto ; We have something, let us go...
2054
2055 bmapNone: stw r9,0(r10) ; Unlock it, we have nothing here
2056 ; No sync here because we have not changed anything
2057
2058 /*
2059 * When we come here, we know that we can't handle this. Restore whatever
2060 * state that we trashed and go back to continue handling the interrupt.
2061 */
2062
2063 realFault: li r10,0 /* Get lock clear value */
2064 lwz r3,saveexception(r13) /* Figure out the exception code again */
2065 stw r10,PCAlock(r8) /* Clear the PTEG lock */
2066 #if PERFTIMES && DEBUG
2067 mflr r11
2068 mr r4,r3
2069 li r3,33
2070 bl EXT(dbgLog2) ; Start of hw_add_map
2071 mr r3,r4
2072 mtlr r11
2073 #endif
2074 blr /* Blow back and handle exception */
2075
2076 .align 5
2077
2078 findAuto: mr. r4,r4 ; Is there more?
2079 beq- bmapNone ; No more...
2080 lwz r5,bmstart(r4) ; Get the bottom of range
2081 lwz r11,bmend(r4) ; Get the top of range
2082 cmplw cr0,r6,r5 ; Are we before the entry?
2083 cmplw cr1,r6,r11 ; Are we after the entry?
2084 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2085 bne+ cr1,faGot ; Found it...
2086
2087 lwz r4,bmnext(r4) ; Get the next one
2088 b findAuto ; Check it out...
2089
2090 faGot:
2091 lwz r7,blkFlags(r4) ; Get the flags
2092 rlwinm. r7,r7,0,blkRembit,blkRembit ; is this mapping partially removed
2093 bne bmapNone ; Pending remove, bail out
2094 rlwinm r6,r6,0,0,19 ; Round to page
2095 lwz r2,bmPTEr(r4) ; Get the real part of the PTE
2096 sub r5,r6,r5 ; Get offset into area
2097 stw r9,0(r10) ; Unlock it, we are done with it (no sync needed)
2098 add r2,r2,r5 ; Adjust the real address
2099
2100 lis r4,0x8080 /* Indicate that this was autogened */
2101 li r0,0x0180 /* Autogenned areas always set RC bits.
2102 This keeps the hardware from having
2103 to do two storage writes */
2104
2105 /*
2106 * Here where we insert the PTE into the hash. The PTE image is in R3, R2.
2107 * The PTEG allocation controls are a bit map of the state of the PTEG. The
2108 * PCAlock bits are a temporary lock for the specified PTE. PCAfree indicates that
2109 * the PTE slot is empty. PCAauto means that it comes from an autogen area. These
2110 * guys do not keep track of reference and change and are actually "wired".
2111 * They're easy to maintain. PCAsteal
2112 * is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
2113 * fields fit in a single word and are loaded and stored under control of the
2114 * PTEG control area lock (PCAlock).
2115 *
2116 * Note that PCAauto does not contribute to the steal calculations at all. Originally
2117 * it did, autogens were second in priority. This can result in a pathalogical
2118 * case where an instruction can not make forward progress, or one PTE slot
2119 * thrashes.
2120 *
2121 * Physically, the fields are arranged:
2122 * 0: PCAfree
2123 * 1: PCAauto
2124 * 2: PCAlock
2125 * 3: PCAsteal
2126 */
2127
2128 insert: lwz r10,PCAallo(r8) /* Get the PTEG controls */
2129 eqv r6,r6,r6 /* Get all ones */
2130 mr r11,r10 /* Make a copy */
2131 rlwimi r6,r10,8,16,23 /* Insert sliding steal position */
2132 rlwimi r11,r11,24,24,31 /* Duplicate the locked field */
2133 addi r6,r6,-256 /* Form mask */
2134 rlwimi r11,r11,16,0,15 /* This gives us a quadrupled lock mask */
2135 rlwinm r5,r10,31,24,0 /* Slide over the mask for next time */
2136 mr r9,r10 /* Make a copy to test */
2137 not r11,r11 /* Invert the quadrupled lock */
2138 or r2,r2,r0 /* Force on R, and maybe C bit */
2139 and r9,r9,r11 /* Remove the locked guys */
2140 rlwimi r5,r5,8,24,24 /* Wrap bottom bit to top in mask */
2141 rlwimi r9,r11,0,16,31 /* Put two copies of the unlocked entries at the end */
2142 rlwinm r6,r6,0,16,7 ; Remove the autogens from the priority calculations
2143 rlwimi r10,r5,0,24,31 /* Move steal map back in */
2144 and r9,r9,r6 /* Set the starting point for stealing */
2145
2146 /* So, now we have in R9:
2147 byte 0 = ~locked & free
2148 byte 1 = 0
2149 byte 2 = ~locked & (PCAsteal - 1)
2150 byte 3 = ~locked
2151
2152 Each bit position represents (modulo 8) a PTE. If it is 1, it is available for
2153 allocation at its priority level, left to right.
2154
2155 Additionally, the PCA steal field in R10 has been rotated right one bit.
2156 */
2157
2158
2159 rlwinm r21,r10,8,0,7 ; Isolate just the old autogen bits
2160 cntlzw r6,r9 /* Allocate a slot */
2161 mr r14,r12 /* Save our mapping for later */
2162 cmplwi r6,32 ; Was there anything available?
2163 rlwinm r7,r6,29,30,31 /* Get the priority slot we got this from */
2164 rlwinm r6,r6,0,29,31 ; Isolate bit position
2165 srw r11,r4,r6 /* Position the PTEG control bits */
2166 slw r21,r21,r6 ; Move corresponding old autogen flag to bit 0
2167 mr r22,r11 ; Get another copy of the selected slot
2168
2169 beq- realFault /* Arghh, no slots! Take the long way 'round... */
2170
2171 /* Remember, we've already set up the mask pattern
2172 depending upon how we got here:
2173 if got here from simple mapping, R4=0x80000000,
2174 if we got here from autogen it is 0x80800000. */
2175
2176 rlwinm r6,r6,3,26,28 /* Start calculating actual PTE address */
2177 rlwimi r22,r22,24,8,15 ; Duplicate selected slot in second byte
2178 rlwinm. r11,r11,0,8,15 /* Isolate just the auto bit (remember about it too) */
2179 andc r10,r10,r22 /* Turn off the free and auto bits */
2180 add r6,r8,r6 /* Get position into PTEG control area */
2181 cmplwi cr1,r7,1 /* Set the condition based upon the old PTE type */
2182 sub r6,r6,r1 /* Switch it to the hash table */
2183 or r10,r10,r11 /* Turn auto on if it is (PTEG control all set up now) */
2184 subi r6,r6,1 /* Point right */
2185 stw r10,PCAallo(r8) /* Allocate our slot */
2186 dcbt br0,r6 ; Touch in the PTE
2187 bne wasauto /* This was autogenned... */
2188
2189 stw r6,mmPTEent(r14) /* Link the mapping to the PTE slot */
2190
2191 /*
2192 * So, now we're here and what exactly do we have? We've got:
2193 * 1) a full PTE entry, both top and bottom words in R3 and R2
2194 * 2) an allocated slot in the PTEG.
2195 * 3) R8 still points to the PTEG Control Area (PCA)
2196 * 4) R6 points to the PTE entry.
2197 * 5) R1 contains length of the hash table-1. We use this to back-translate
2198 * a PTE to a virtual address so we can invalidate TLBs.
2199 * 6) R11 has a copy of the PCA controls we set.
2200 * 7a) R7 indicates what the PTE slot was before we got to it. 0 shows
2201 * that it was empty and 2 or 3, that it was
2202 * a we've stolen a live one. CR1 is set to LT for empty and GT
2203 * otherwise.
2204 * 7b) Bit 0 of R21 is 1 if the stolen PTE was autogenned
2205 * 8) So far as our selected PTE, it should be valid if it was stolen
2206 * and invalid if not. We could put some kind of assert here to
2207 * check, but I think that I'd rather leave it in as a mysterious,
2208 * non-reproducable bug.
2209 * 9) The new PTE's mapping has been moved to the front of its PTEG hash list
2210 * so that it's kept in some semblance of a MRU list.
2211 * 10) R14 points to the mapping we're adding.
2212 *
2213 * So, what do we have to do yet?
2214 * 1) If we stole a slot, we need to invalidate the PTE completely.
2215 * 2) If we stole one AND it was not an autogen,
2216 * copy the entire old PTE (including R and C bits) to its mapping.
2217 * 3) Set the new PTE in the PTEG and make sure it is valid.
2218 * 4) Unlock the PTEG control area.
2219 * 5) Go back to the interrupt handler, changing the interrupt
2220 * code to "in vain" which will restore the registers and bail out.
2221 *
2222 */
2223 wasauto: oris r3,r3,0x8000 /* Turn on the valid bit */
2224 blt+ cr1,slamit /* It was empty, go slam it on in... */
2225
2226 lwz r10,0(r6) /* Grab the top part of the PTE */
2227 rlwinm r12,r6,6,4,19 /* Match up the hash to a page boundary */
2228 rlwinm r5,r10,5,4,19 /* Extract the VSID to a page boundary */
2229 rlwinm r10,r10,0,1,31 /* Make it invalid */
2230 xor r12,r5,r12 /* Calculate vaddr */
2231 stw r10,0(r6) /* Invalidate the PTE */
2232 rlwinm r5,r10,7,27,29 ; Move nybble 0 up to subhash position
2233 rlwimi r12,r10,1,0,3 /* Move in the segment portion */
2234 lis r9,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
2235 xor r5,r5,r10 ; Splooch nybble 0 and 1
2236 rlwimi r12,r10,22,4,9 /* Move in the API */
2237 ori r9,r9,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
2238 rlwinm r4,r10,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
2239
2240 sync /* Make sure the invalid is stored */
2241
2242 xor r4,r4,r5 ; Finish splooching nybble 0, 1, and the low bits of the VSID
2243
2244 tlbhang: lwarx r5,0,r9 /* Get the TLBIE lock */
2245
2246 rlwinm r4,r4,0,27,29 ; Clean up splooched hash value
2247
2248 mr. r5,r5 /* Is it locked? */
2249 add r4,r4,r8 /* Point to the offset into the PCA area */
2250 li r5,1 /* Get our lock word */
2251 bne- tlbhang /* It's locked, go wait... */
2252
2253 la r4,PCAhash(r4) /* Point to the start of the hash chain for the PTE we're replacing */
2254
2255 stwcx. r5,0,r9 /* Try to get it */
2256 bne- tlbhang /* We was beat... */
2257
2258 mfspr r7,pvr /* Find out what kind of machine we are */
2259 li r5,0 /* Lock clear value */
2260 rlwinm r7,r7,16,16,31 /* Isolate CPU type */
2261
2262 tlbie r12 /* Invalidate it everywhere */
2263
2264 cmplwi r7,3 /* Is this a 603? */
2265 stw r5,0(r9) /* Clear the lock */
2266
2267 beq- its603 /* It's a 603, skip the tlbsync... */
2268
2269 eieio /* Make sure that the tlbie happens first */
2270 tlbsync /* wait for everyone to catch up */
2271 isync
2272
2273 its603: rlwinm. r21,r21,0,0,0 ; See if we just stole an autogenned entry
2274 sync /* Make sure of it all */
2275
2276 bne slamit ; The old was an autogen, time to slam the new in...
2277
2278 lwz r9,4(r6) /* Get the real portion of old PTE */
2279 lwz r7,0(r4) /* Get the first element. We can't get to here
2280 if we aren't working with a mapping... */
2281 mr r0,r7 ; Save pointer to first element
2282
2283 findold: mr r1,r11 ; Save the previous guy
2284 mr. r11,r7 /* Copy and test the chain */
2285 beq- bebad /* Assume it's not zero... */
2286
2287 lwz r5,mmPTEv(r11) /* See if this is the old active one */
2288 cmplw cr2,r11,r14 /* Check if this is actually the new one */
2289 cmplw r5,r10 /* Is this us? (Note: valid bit kept off in mappings) */
2290 lwz r7,mmhashnext(r11) /* Get the next one in line */
2291 beq- cr2,findold /* Don't count the new one... */
2292 cmplw cr2,r11,r0 ; Check if we are first on the list
2293 bne+ findold /* Not it (and assume the worst)... */
2294
2295 lwz r12,mmphysent(r11) /* Get the pointer to the physical entry */
2296 beq- cr2,nomove ; We are first, no need to requeue...
2297
2298 stw r11,0(r4) ; Chain us to the head
2299 stw r0,mmhashnext(r11) ; Chain the old head to us
2300 stw r7,mmhashnext(r1) ; Unlink us
2301
2302 nomove: li r5,0 /* Clear this on out */
2303
2304 mr. r12,r12 /* Is there a physical entry? */
2305 stw r5,mmPTEent(r11) ; Clear the PTE entry pointer
2306 li r5,pepte1 /* Point to the PTE last half */
2307 stw r9,mmPTEr(r11) ; Squirrel away the whole thing (RC bits are in here)
2308
2309 beq- mrgmrcx ; No physical entry for this one...
2310
2311 rlwinm r11,r9,0,23,24 /* Keep only the RC bits */
2312
2313 mrgmrcx: lwarx r9,r5,r12 /* Get the master copy */
2314 or r9,r9,r11 /* Merge in latest RC */
2315 stwcx. r9,r5,r12 /* Save it back */
2316 bne- mrgmrcx /* If it changed, try again... */
2317
2318 /*
2319 * Here's where we finish up. We save the real part of the PTE, eieio it, to make sure it's
2320 * out there before the top half (with the valid bit set).
2321 */
2322
2323 slamit: stw r2,4(r6) /* Stash the real part */
2324 li r4,0 /* Get a lock clear value */
2325 eieio /* Erect a barricade */
2326 stw r3,0(r6) /* Stash the virtual part and set valid on */
2327
2328 stw r4,PCAlock(r8) /* Clear the PCA lock */
2329
2330 li r3,T_IN_VAIN /* Say that we handled it */
2331 sync /* Go no further until the stores complete */
2332 #if PERFTIMES && DEBUG
2333 mflr r11
2334 mr r4,r3
2335 li r3,33
2336 bl EXT(dbgLog2) ; Start of hw_add_map
2337 mr r3,r4
2338 mtlr r11
2339 #endif
2340 blr /* Back to the fold... */
2341
2342 bebad: lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
2343 ori r0,r0,LOW_ADDR(Choke)
2344 sc /* Firmware Heimlich maneuver */
2345
2346 /*
2347 * This walks the hash table or DBATs to locate the physical address of a virtual one.
2348 * The space is provided. If it is the kernel space, the DBATs are searched first. Failing
2349 * that, the hash table is accessed. Zero is returned for failure, so it must be special cased.
2350 * This is usually used for debugging, so we try not to rely
2351 * on anything that we don't have to.
2352 */
2353
2354 ENTRY(LRA, TAG_NO_FRAME_USED)
2355
2356 mfsprg r8,2 ; Get feature flags
2357 mfmsr r10 /* Save the current MSR */
2358 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2359 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2360 mtcrf 0x04,r8 ; Set the features
2361 xoris r5,r3,HIGH_ADDR(PPC_SID_KERNEL) /* Clear the top half if equal */
2362 andi. r9,r10,0x7FCF /* Turn off interrupts and translation */
2363 eqv r12,r12,r12 /* Fill the bottom with foxes */
2364
2365 bt pfNoMSRirb,lraNoMSR ; No MSR...
2366
2367 mtmsr r9 ; Translation and all off
2368 isync ; Toss prefetch
2369 b lraNoMSRx
2370
2371 lraNoMSR:
2372 mr r7,r3
2373 li r0,loadMSR ; Get the MSR setter SC
2374 mr r3,r9 ; Get new MSR
2375 sc ; Set it
2376 mr r3,r7
2377 lraNoMSRx:
2378
2379 cmplwi r5,LOW_ADDR(PPC_SID_KERNEL) /* See if this is kernel space */
2380 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
2381 isync /* Purge pipe */
2382 bne- notkernsp /* This is not for the kernel... */
2383
2384 mfspr r5,dbat0u /* Get the virtual address and length */
2385 eqv r8,r8,r8 /* Get all foxes */
2386 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2387 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2388 beq- ckbat1 /* not valid, skip this one... */
2389 sub r7,r4,r7 /* Subtract out the base */
2390 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2391 mfspr r6,dbat0l /* Get the real part */
2392 cmplw r7,r8 /* Check if it is in the range */
2393 bng+ fndbat /* Yup, she's a good un... */
2394
2395 ckbat1: mfspr r5,dbat1u /* Get the virtual address and length */
2396 eqv r8,r8,r8 /* Get all foxes */
2397 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2398 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2399 beq- ckbat2 /* not valid, skip this one... */
2400 sub r7,r4,r7 /* Subtract out the base */
2401 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2402 mfspr r6,dbat1l /* Get the real part */
2403 cmplw r7,r8 /* Check if it is in the range */
2404 bng+ fndbat /* Yup, she's a good un... */
2405
2406 ckbat2: mfspr r5,dbat2u /* Get the virtual address and length */
2407 eqv r8,r8,r8 /* Get all foxes */
2408 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2409 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2410 beq- ckbat3 /* not valid, skip this one... */
2411 sub r7,r4,r7 /* Subtract out the base */
2412 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2413 mfspr r6,dbat2l /* Get the real part */
2414 cmplw r7,r8 /* Check if it is in the range */
2415 bng- fndbat /* Yup, she's a good un... */
2416
2417 ckbat3: mfspr r5,dbat3u /* Get the virtual address and length */
2418 eqv r8,r8,r8 /* Get all foxes */
2419 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2420 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2421 beq- notkernsp /* not valid, skip this one... */
2422 sub r7,r4,r7 /* Subtract out the base */
2423 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2424 mfspr r6,dbat3l /* Get the real part */
2425 cmplw r7,r8 /* Check if it is in the range */
2426 bgt+ notkernsp /* No good... */
2427
2428 fndbat: rlwinm r6,r6,0,0,14 /* Clean up the real address */
2429 mtmsr r10 /* Restore state */
2430 add r3,r7,r6 /* Relocate the offset to real */
2431 isync /* Purge pipe */
2432 blr /* Bye, bye... */
2433
2434 notkernsp: mfspr r5,sdr1 /* Get hash table base and size */
2435 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
2436 rlwimi r12,r5,16,0,15 /* Make table size -1 out of mask */
2437 rlwinm r7,r4,26,10,25 /* Isolate the page index */
2438 andc r5,r5,r12 /* Clean up the hash table */
2439 xor r7,r7,r11 /* Get primary hash */
2440 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
2441 and r7,r7,r12 /* Wrap the hash */
2442 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
2443 add r5,r7,r5 /* Point to the PTEG */
2444 oris r11,r11,0x8000 /* Slam on valid bit so's we don't match an invalid one */
2445
2446 li r9,8 /* Get the number of PTEs to check */
2447 lwz r6,0(r5) /* Preload the virtual half */
2448
2449 fndpte: subi r9,r9,1 /* Count the pte */
2450 lwz r3,4(r5) /* Get the real half */
2451 cmplw cr1,r6,r11 /* Is this what we want? */
2452 lwz r6,8(r5) /* Start to get the next virtual half */
2453 mr. r9,r9 /* Any more to try? */
2454 addi r5,r5,8 /* Bump to next slot */
2455 beq cr1,gotxlate /* We found what we were looking for... */
2456 bne+ fndpte /* Go try the next PTE... */
2457
2458 mtmsr r10 /* Restore state */
2459 li r3,0 /* Show failure */
2460 isync /* Purge pipe */
2461 blr /* Leave... */
2462
2463 gotxlate: mtmsr r10 /* Restore state */
2464 rlwimi r3,r4,0,20,31 /* Cram in the page displacement */
2465 isync /* Purge pipe */
2466 blr /* Return... */
2467
2468
2469
2470 /*
2471 * struct blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr)
2472 *
2473 * This is used to add a block mapping entry to the MRU list whose top
2474 * node is anchored at bmaps. This is a real address and is also used as
2475 * the lock.
2476 *
2477 * Overlapping areas are not allowed. If we find one, we return it's address and
2478 * expect the upper layers to panic. We only check this for a debug build...
2479 *
2480 */
2481
2482 .align 5
2483 .globl EXT(hw_add_blk)
2484
2485 LEXT(hw_add_blk)
2486
2487 mfsprg r9,2 ; Get feature flags
2488 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2489 mfmsr r0 /* Save the MSR */
2490 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2491 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2492 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2493 mtcrf 0x04,r9 ; Set the features
2494 xor r3,r3,r6 ; Get real address of bmap anchor
2495 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2496 la r3,PMAP_BMAPS(r3) ; Point to bmap header
2497
2498 bt pfNoMSRirb,habNoMSR ; No MSR...
2499
2500 mtmsr r12 ; Translation and all off
2501 isync ; Toss prefetch
2502 b habNoMSRx
2503
2504 habNoMSR:
2505 mr r9,r0
2506 mr r8,r3
2507 li r0,loadMSR ; Get the MSR setter SC
2508 mr r3,r12 ; Get new MSR
2509 sc ; Set it
2510 mr r3,r8
2511 mr r0,r9
2512 habNoMSRx:
2513
2514 abLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2515 rlwinm. r8,r9,0,31,31 ; Is it locked?
2516 ori r8,r9,1 ; Set the lock
2517 bne- abLckw ; Yeah...
2518 stwcx. r8,0,r3 ; Lock the bmap list
2519 bne- abLck ; Someone else was trying, try again...
2520 b abSXg ; All done...
2521
2522 .align 4
2523
2524 abLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2525 beq+ abLck ; Not no more...
2526 lwz r9,0(r3) ; Get lock word again...
2527 b abLckw ; Check it out...
2528
2529 .align 5
2530
2531 nop ; Force ISYNC to last instruction in IFETCH
2532 nop
2533
2534 abSXg: rlwinm r11,r9,0,0,26 ; Clear out flags and lock
2535 isync ; Make sure we have not used anything yet
2536
2537 ;
2538 ;
2539 ;
2540
2541 lwz r7,bmstart(r4) ; Get start
2542 lwz r8,bmend(r4) ; Get end
2543 mr r2,r11 ; Get chain
2544
2545 abChk: mr. r10,r2 ; End of chain?
2546 beq abChkD ; Yes, chain is ok...
2547 lwz r5,bmstart(r10) ; Get start of current area
2548 lwz r6,bmend(r10) ; Get end of current area
2549
2550 cmplw cr0,r8,r5 ; Is the end of the new before the old?
2551 cmplw cr1,r8,r6 ; Is the end of the new after the old?
2552 cmplw cr6,r6,r7 ; Is the end of the old before the new?
2553 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in old
2554 cmplw cr7,r6,r8 ; Is the end of the old after the new?
2555 lwz r2,bmnext(r10) ; Get pointer to the next
2556 cror cr6_eq,cr6_lt,cr7_gt ; Set cr2_eq if old not in new
2557 crand cr1_eq,cr1_eq,cr6_eq ; Set cr1_eq if no overlap
2558 beq+ cr1,abChk ; Ok check the next...
2559
2560 lwz r8,blkFlags(r10) ; Get the flags
2561 rlwinm. r8,r8,0,blkRembit,blkRembit ; Check the blkRem bit
2562 beq abRet ; Is the mapping partially removed
2563 ori r10,r10,2 ; Indicate that this block is partially removed
2564 abRet:
2565 stw r9,0(r3) ; Unlock
2566 mtmsr r0 ; Restore xlation and rupts
2567 mr r3,r10 ; Pass back the overlap
2568 isync ;
2569 blr ; Return...
2570
2571 abChkD: stw r11,bmnext(r4) ; Chain this on in
2572 rlwimi r4,r9,0,27,31 ; Copy in locks and flags
2573 sync ; Make sure that is done
2574
2575 stw r4,0(r3) ; Unlock and chain the new first one
2576 mtmsr r0 ; Restore xlation and rupts
2577 li r3,0 ; Pass back a no failure return code
2578 isync
2579 blr ; Return...
2580
2581
2582 /*
2583 * struct blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2584 *
2585 * This is used to remove a block mapping entry from the list that
2586 * is anchored at bmaps. bmaps is a virtual address and is also used as
2587 * the lock.
2588 *
2589 * Note that this function clears a single block that contains
2590 * any address within the range sva to eva (inclusive). To entirely
2591 * clear any range, hw_rem_blk must be called repeatedly until it
2592 * returns a 0.
2593 *
2594 * The block is removed from the list and all hash table entries
2595 * corresponding to the mapped block are invalidated and the TLB
2596 * entries are purged. If the block is large, this could take
2597 * quite a while. We need to hash every possible address in the
2598 * range and lock down the PCA.
2599 *
2600 * If we attempt to remove a permanent entry, we will not do it.
2601 * The block address will be ored with 1 and returned.
2602 *
2603 *
2604 */
2605
2606 .align 5
2607 .globl EXT(hw_rem_blk)
2608
2609 LEXT(hw_rem_blk)
2610
2611 mfsprg r9,2 ; Get feature flags
2612 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2613 mfmsr r0 /* Save the MSR */
2614 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2615 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2616 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2617 mtcrf 0x04,r9 ; Set the features
2618 xor r3,r3,r6 ; Get real address of bmap anchor
2619 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2620 la r3,PMAP_BMAPS(r3) ; Point to the bmap chain head
2621
2622 bt pfNoMSRirb,hrbNoMSR ; No MSR...
2623
2624 mtmsr r12 ; Translation and all off
2625 isync ; Toss prefetch
2626 b hrbNoMSRx
2627
2628 hrbNoMSR:
2629 mr r9,r0
2630 mr r8,r3
2631 li r0,loadMSR ; Get the MSR setter SC
2632 mr r3,r12 ; Get new MSR
2633 sc ; Set it
2634 mr r3,r8
2635 mr r0,r9
2636 hrbNoMSRx:
2637 li r7,0
2638 cmp cr5,r0,r7 ; Request to invalidate the ptes
2639 b rbLck
2640
2641 rbunlink:
2642 lwz r4,bmstart(r10) ; Get start of current mapping
2643 lwz r5,bmend(r10) ; Get end of current mapping
2644 cmp cr5,r3,r3 ; Request to unlink the mapping
2645
2646 rbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2647 rlwinm. r8,r9,0,31,31 ; Is it locked?
2648 ori r8,r9,1 ; Set the lock
2649 bne- rbLckw ; Yeah...
2650 stwcx. r8,0,r3 ; Lock the bmap list
2651 bne- rbLck ; Someone else was trying, try again...
2652 b rbSXg ; All done...
2653
2654 .align 4
2655
2656 rbLckw: rlwinm. r11,r9,0,31,31 ; Check if it is still held
2657 beq+ rbLck ; Not no more...
2658 lwz r9,0(r3) ; Get lock word again...
2659 b rbLckw ; Check it out...
2660
2661 .align 5
2662
2663 nop ; Force ISYNC to last instruction in IFETCH
2664 nop
2665
2666 rbSXg: rlwinm. r2,r9,0,0,26 ; Clear out flags and lock
2667 mr r10,r3 ; Keep anchor as previous pointer
2668 isync ; Make sure we have not used anything yet
2669
2670 beq- rbMT ; There is nothing in the list
2671
2672 rbChk: mr r12,r10 ; Save the previous
2673 mr. r10,r2 ; End of chain?
2674 beq rbMT ; Yes, nothing to do...
2675 lwz r11,bmstart(r10) ; Get start of current area
2676 lwz r6,bmend(r10) ; Get end of current area
2677
2678 cmplw cr0,r5,r11 ; Is the end of range before the start of the area?
2679 cmplw cr1,r4,r6 ; Is the start of range after the end of the area?
2680 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2681 lwz r2,bmnext(r10) ; Get the next one
2682 beq+ cr1,rbChk ; Not this one, check the next...
2683
2684 cmplw cr1,r12,r3 ; Is the current mapping the first one?
2685
2686 bne cr5,rbblkRem ; Do we have to unchain the mapping
2687
2688 bne cr1,rbnFirst ; Yes, is this the first mapping?
2689 rlwimi r9,r2,0,0,26 ; Yes, Change the lock value
2690 ori r2,r9,1 ; Turn on the lock bit
2691 rbnFirst:
2692 stw r2,bmnext(r12) ; Unchain us
2693 sync
2694 b rbDone
2695
2696 rbblkRem:
2697
2698 lwz r8,blkFlags(r10) ; Get the flags
2699
2700 rlwinm. r7,r8,0,blkPermbit,blkPermbit ; is this a permanent block?
2701
2702 bne- rbPerm ; This is permanent, do not remove...
2703
2704 rlwinm. r7,r8,0,blkRembit,blkRembit ; is this mapping partially removed
2705
2706 beq rbblkRemcont ; If not, check the max size
2707 lwz r11,bmcurrent(r10) ; If yes, resume for the current page
2708
2709 cmp cr5,r11,r6 ; No partial remove left
2710 beq cr5, rbpendret ; But there is a pending remove
2711
2712 rbblkRemcont:
2713 bne rbblkRemcont1 ; Is it the first remove
2714
2715 oris r8,r8,hi16(blkRem) ; Yes
2716 stw r8,blkFlags(r10) ; set the blkRem bit in blkFlags
2717
2718 rbblkRemcont1:
2719 lis r5,hi16(BLKREMMAX*4096) ; Load maximun size tear down
2720 ori r5,r5,lo16(BLKREMMAX*4096) ; Load maximun size tear down
2721 sub r7,r6,r11 ; Get the remaining size to tear down
2722 cmp cr5,r7,r5 ; Compare against the maximun size
2723 ble cr5,rbfullblk ; If less or equal, go remove the mapping
2724
2725 add r7,r11,r5 ; Add the max size tear down to the current page
2726 stw r7,bmcurrent(r10) ; Update the current page
2727 subi r6,r7,1 ; Set the current end of the partial tear down
2728 b rbcont
2729
2730 rbfullblk:
2731 stw r6,bmcurrent(r10) ; Update the current page
2732
2733 rbcont:
2734 lwz r8,bmspace(r10) ; Get the VSID
2735 sync
2736 stw r9,0(r3) ; Unlock and chain the new first one
2737
2738 eqv r4,r4,r4 ; Fill the bottom with foxes
2739 mfspr r12,sdr1 ; Get hash table base and size
2740 rlwinm r8,r8,6,0,25 ; Align VSID to PTEG
2741 rlwimi r4,r12,16,0,15 ; Make table size - 1 out of mask
2742 andc r12,r12,r4 ; Clean up address of hash table
2743 rlwinm r5,r11,26,6,25 ; Rotate virtual start address into PTEG units
2744 add r12,r12,r4 ; Point to PCA - 1
2745 rlwinm r6,r6,26,6,25 ; Rotate virtual end address into PTEG units
2746 addi r12,r12,1 ; Point to PCA base
2747 sub r6,r6,r5 ; Get the total number of PTEGs to clear
2748 cmplw r6,r4 ; See if this wraps all the way around
2749 blt rbHash ; Nope, length is right
2750 subi r6,r4,32+31 ; Back down to correct length
2751
2752 rbHash: rlwinm r5,r5,0,10,25 ; Keep only the page index
2753 xor r2,r8,r5 ; Hash into table
2754 and r2,r2,r4 ; Wrap into the table
2755 add r2,r2,r12 ; Point right at the PCA
2756
2757 rbLcka: lwarx r7,0,r2 ; Get the PTEG lock
2758 mr. r7,r7 ; Is it locked?
2759 bne- rbLckwa ; Yeah...
2760 li r7,1 ; Get the locked value
2761 stwcx. r7,0,r2 ; Take it
2762 bne- rbLcka ; Someone else was trying, try again...
2763 b rbSXga ; All done...
2764
2765 rbLckwa: mr. r7,r7 ; Check if it is already held
2766 beq+ rbLcka ; It is clear...
2767 lwz r7,0(r2) ; Get lock word again...
2768 b rbLckwa ; Wait...
2769
2770 rbSXga: isync ; Make sure nothing used yet
2771 lwz r7,PCAallo(r2) ; Get the allocation word
2772 rlwinm. r11,r7,8,0,7 ; Isolate the autogenerated PTEs
2773 or r7,r7,r11 ; Release the autogen slots
2774 beq+ rbAintNone ; There are not any here
2775 mtcrf 0xC0,r11 ; Set the branch masks for autogens
2776 sub r11,r2,r4 ; Move back to the hash table + 1
2777 rlwinm r7,r7,0,16,7 ; Clear the autogen field
2778 subi r11,r11,1 ; Point to the PTEG
2779 stw r7,PCAallo(r2) ; Update the flags
2780 li r7,0 ; Get an invalid PTE value
2781
2782 bf 0,rbSlot1 ; No autogen here
2783 stw r7,0x00(r11) ; Invalidate PTE
2784 rbSlot1: bf 1,rbSlot2 ; No autogen here
2785 stw r7,0x08(r11) ; Invalidate PTE
2786 rbSlot2: bf 2,rbSlot3 ; No autogen here
2787 stw r7,0x10(r11) ; Invalidate PTE
2788 rbSlot3: bf 3,rbSlot4 ; No autogen here
2789 stw r7,0x18(r11) ; Invalidate PTE
2790 rbSlot4: bf 4,rbSlot5 ; No autogen here
2791 stw r7,0x20(r11) ; Invalidate PTE
2792 rbSlot5: bf 5,rbSlot6 ; No autogen here
2793 stw r7,0x28(r11) ; Invalidate PTE
2794 rbSlot6: bf 6,rbSlot7 ; No autogen here
2795 stw r7,0x30(r11) ; Invalidate PTE
2796 rbSlot7: bf 7,rbSlotx ; No autogen here
2797 stw r7,0x38(r11) ; Invalidate PTE
2798 rbSlotx:
2799
2800 rbAintNone: li r7,0 ; Clear this out
2801 sync ; To make SMP happy
2802 addic. r6,r6,-64 ; Decrement the count
2803 stw r7,PCAlock(r2) ; Release the PTEG lock
2804 addi r5,r5,64 ; Move up by adjusted page number
2805 bge+ rbHash ; Not done...
2806
2807 sync ; Make sure the memory is quiet
2808
2809 ;
2810 ; Here we take the easy way out and just purge the entire TLB. This is
2811 ; certainly faster and definitly easier than blasting just the correct ones
2812 ; in the range, we only need one lock and one TLBSYNC. We would hope
2813 ; that most blocks are more than 64 pages (256K) and on every machine
2814 ; up to Book E, 64 TLBIEs will invalidate the entire table.
2815 ;
2816
2817 li r5,64 ; Get number of TLB entries to purge
2818 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
2819 li r6,0 ; Start at 0
2820 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
2821
2822 rbTlbL: lwarx r2,0,r12 ; Get the TLBIE lock
2823 mr. r2,r2 ; Is it locked?
2824 li r2,1 ; Get our lock value
2825 bne- rbTlbL ; It is locked, go wait...
2826 stwcx. r2,0,r12 ; Try to get it
2827 bne- rbTlbL ; We was beat...
2828
2829 rbTlbN: addic. r5,r5,-1 ; See if we did them all
2830 tlbie r6 ; Invalidate it everywhere
2831 addi r6,r6,0x1000 ; Up to the next page
2832 bgt+ rbTlbN ; Make sure we have done it all...
2833
2834 mfspr r5,pvr ; Find out what kind of machine we are
2835 li r2,0 ; Lock clear value
2836
2837 rlwinm r5,r5,16,16,31 ; Isolate CPU type
2838 cmplwi r5,3 ; Is this a 603?
2839 sync ; Make sure all is quiet
2840 beq- rbits603a ; It is a 603, skip the tlbsync...
2841
2842 eieio ; Make sure that the tlbie happens first
2843 tlbsync ; wait for everyone to catch up
2844 isync
2845
2846 rbits603a: sync ; Wait for quiet again
2847 stw r2,0(r12) ; Unlock invalidates
2848
2849 sync ; Make sure that is done
2850
2851 ble cr5,rbunlink ; If all ptes are flush, go unlink the mapping
2852 mtmsr r0 ; Restore xlation and rupts
2853 mr r3,r10 ; Pass back the removed block in progress
2854 ori r3,r3,2 ; Indicate that the block remove isn't completed yet
2855 isync
2856 blr ; Return...
2857
2858 rbpendret:
2859 stw r9,0(r3) ; Unlock
2860 mtmsr r0 ; Restore xlation and rupts
2861 mr r3,r10 ; Pass back the removed block in progress
2862 ori r3,r3,2 ; Indicate that the block remove isn't completed yet
2863 isync
2864 blr ; Return...
2865
2866
2867 rbMT: stw r9,0(r3) ; Unlock
2868 mtmsr r0 ; Restore xlation and rupts
2869 li r3,0 ; Say we did not find one
2870 isync
2871 blr ; Return...
2872
2873 rbPerm: stw r9,0(r3) ; Unlock
2874 mtmsr r0 ; Restore xlation and rupts
2875 ori r3,r10,1 ; Say we did not remove it
2876 isync
2877 blr ; Return...
2878
2879 rbDone: stw r9,0(r3) ; Unlock
2880 mtmsr r0 ; Restore xlation and rupts
2881 mr r3,r10 ; Pass back the removed block
2882 isync
2883 blr ; Return...
2884
2885 /*
2886 * hw_select_mappings(struct mappingflush *mappingflush)
2887 *
2888 * Input: PCA addr
2889 * Ouput: up to 8 user mappings
2890 *
2891 * hw_select_mappings() scans every PCA mapping hash lists and select
2892 * the last user mapping if it exists.
2893 *
2894 */
2895
2896 .align 5
2897 .globl EXT(hw_select_mappings)
2898
2899 LEXT(hw_select_mappings)
2900 mr r5,r3 ; Get the mapping flush addr
2901 mfmsr r12 ; Get the MSR
2902 rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2903 rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2904 mfsprg r9,2 ; Get feature flags
2905 andi. r0,r12,0x7FCF ; Disable translation and interruptions
2906 mtcrf 0x04,r9 ; Set the features
2907 bt pfNoMSRirb,hvmNoMSR ; No MSR...
2908 mtmsr r0
2909 isync
2910 b hvmNoMSRx
2911 hvmNoMSR:
2912 mr r3,r0 ; Get the new MSR
2913 li r0,loadMSR ; Get the MSR setter SC
2914 sc
2915 hvmNoMSRx:
2916 mr r0,r12
2917 li r11,1 ; Get the locked value
2918
2919 hvmptegLckx:
2920 lwz r3,MFpcaptr(r5) ; Get the PCA pointer
2921 lwarx r10,0,r3 ; Get the PTEG lock
2922 mr. r10,r10 ; Is it locked?
2923 bne- hvmptegLckwx ; Yeah...
2924 stwcx. r11,0,r3 ; Take take it
2925 bne- hvmptegLckx ; Someone else was trying, try again...
2926 b hvmptegSXgx ; All done...
2927
2928 .align 4
2929
2930 hvmptegLckwx:
2931 mr. r10,r10 ; Check if it is already held
2932 beq+ hvmptegLckx ; It's clear...
2933 lwz r10,0(r3) ; Get lock word again...
2934 b hvmptegLckwx ; Wait...
2935
2936 .align 4
2937
2938 hvmptegSXgx:
2939 isync ; Make sure we haven't used anything yet
2940
2941 li r11,8 ; set count to 8
2942
2943 lwz r6,PCAhash(r3) ; load the first mapping hash list
2944 la r12,PCAhash(r3) ; Point to the mapping hash area
2945 la r4,MFmapping(r5) ; Point to the mapping flush mapping area
2946 li r7,0 ; Load zero
2947 stw r7,MFmappingcnt(r5) ; Set the current count to 0
2948 hvmnexthash:
2949 li r10,0 ; Mapping test
2950
2951 hvmfindmap:
2952 mr. r6,r6 ; Test if the hash list current pointer is zero
2953 beq hvmfindmapret ; Did we hit the end of the hash list
2954 lwz r7,mmPTEv(r6) ; Pick up our virtual ID
2955 rlwinm r8,r7,5,0,19 ; Pick VSID 20 lower bits
2956 mr. r8,r8
2957 beq hvmfindmapnext ; Skip Kernel VSIDs
2958 rlwinm r8,r7,1,0,3 ; Extract the Segment index
2959 rlwinm r9,r7,22,4,9 ; Extract API 6 upper bits
2960 or r8,r8,r9 ; Add to the virtual address
2961 rlwinm r9,r7,31,6,25 ; Pick VSID 19 lower bits
2962 xor r9,r9,r3 ; Exclusive or with the PCA address
2963 rlwinm r9,r9,6,10,19 ; Extract API 10 lower bits
2964 or r8,r8,r9 ; Add to the virtual address
2965
2966 stw r8,4(r4) ; Store the virtual address
2967 lwz r8,mmpmap(r6) ; Get the pmap
2968 stw r8,0(r4) ; Store the pmap
2969 li r10,1 ; Found one
2970
2971 hvmfindmapnext:
2972 lwz r6,mmhashnext(r6) ; Pick up next mapping block
2973 b hvmfindmap ; Scan the next mapping
2974 hvmfindmapret:
2975 mr. r10,r10 ; Found mapping
2976 beq hvmnexthashprep ; If not, do not update the mappingflush array
2977 lwz r7,MFmappingcnt(r5) ; Get the current count
2978 addi r7,r7,1 ; Increment the current count
2979 stw r7,MFmappingcnt(r5) ; Store the current count
2980 addi r4,r4,MFmappingSize ; Point to the next mapping flush entry
2981 hvmnexthashprep:
2982 addi r12,r12,4 ; Load the next hash list
2983 lwz r6,0(r12) ; Load the next hash list entry
2984 subi r11,r11,1 ; Decrement hash list index
2985 mr. r11,r11 ; Test for a remaining hash list
2986 bne hvmnexthash ; Loop to scan the next hash list
2987
2988 li r10,0
2989 stw r10,0(r3) ; Unlock the hash list
2990 mtmsr r0 ; Restore translation and interruptions
2991 isync
2992 blr
2993
2994 /*
2995 * vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va)
2996 *
2997 * This is used to translate a virtual address within a block mapping entry
2998 * to a physical address. If not found, 0 is returned.
2999 *
3000 */
3001
3002 .align 5
3003 .globl EXT(hw_cvp_blk)
3004
3005 LEXT(hw_cvp_blk)
3006
3007 mfsprg r9,2 ; Get feature flags
3008 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
3009 mfmsr r0 /* Save the MSR */
3010 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3011 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3012 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
3013 mtcrf 0x04,r9 ; Set the features
3014 xor r3,r3,r6 ; Get real address of bmap anchor
3015 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
3016 la r3,PMAP_BMAPS(r3) ; Point to chain header
3017
3018 bt pfNoMSRirb,hcbNoMSR ; No MSR...
3019
3020 mtmsr r12 ; Translation and all off
3021 isync ; Toss prefetch
3022 b hcbNoMSRx
3023
3024 hcbNoMSR:
3025 mr r9,r0
3026 mr r8,r3
3027 li r0,loadMSR ; Get the MSR setter SC
3028 mr r3,r12 ; Get new MSR
3029 sc ; Set it
3030 mr r3,r8
3031 mr r0,r9
3032 hcbNoMSRx:
3033
3034 cbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
3035 rlwinm. r8,r9,0,31,31 ; Is it locked?
3036 ori r8,r9,1 ; Set the lock
3037 bne- cbLckw ; Yeah...
3038 stwcx. r8,0,r3 ; Lock the bmap list
3039 bne- cbLck ; Someone else was trying, try again...
3040 b cbSXg ; All done...
3041
3042 .align 4
3043
3044 cbLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
3045 beq+ cbLck ; Not no more...
3046 lwz r9,0(r3) ; Get lock word again...
3047 b cbLckw ; Check it out...
3048
3049 .align 5
3050
3051 nop ; Force ISYNC to last instruction in IFETCH
3052 nop
3053 nop
3054 nop
3055 nop
3056
3057 cbSXg: rlwinm. r11,r9,0,0,26 ; Clear out flags and lock
3058 li r2,0 ; Assume we do not find anything
3059 isync ; Make sure we have not used anything yet
3060
3061 cbChk: mr. r11,r11 ; Is there more?
3062 beq- cbDone ; No more...
3063 lwz r5,bmstart(r11) ; Get the bottom of range
3064 lwz r12,bmend(r11) ; Get the top of range
3065 cmplw cr0,r4,r5 ; Are we before the entry?
3066 cmplw cr1,r4,r12 ; Are we after of the entry?
3067 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
3068 beq- cr1,cbNo ; We are not in the range...
3069
3070 lwz r2,bmPTEr(r11) ; Get the real part of the PTE
3071 sub r5,r4,r5 ; Get offset into area
3072 rlwinm r2,r2,0,0,19 ; Clean out everything but the page
3073 add r2,r2,r5 ; Adjust the real address
3074
3075 cbDone: stw r9,0(r3) ; Unlock it, we are done with it (no sync needed)
3076 mtmsr r0 ; Restore translation and interrupts...
3077 isync ; Make sure it is on
3078 mr r3,r2 ; Set return physical address
3079 blr ; Leave...
3080
3081 .align 5
3082
3083 cbNo: lwz r11,bmnext(r11) ; Link next
3084 b cbChk ; Check it out...
3085
3086
3087 /*
3088 * hw_set_user_space(pmap)
3089 * hw_set_user_space_dis(pmap)
3090 *
3091 * Indicate whether memory space needs to be switched.
3092 * We really need to turn off interrupts here, because we need to be non-preemptable
3093 *
3094 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
3095 * register usage here. The VMM switch code in vmachmon.s that calls this
3096 * know what registers are in use. Check that if these change.
3097 */
3098
3099
3100
3101 .align 5
3102 .globl EXT(hw_set_user_space)
3103
3104 LEXT(hw_set_user_space)
3105
3106 mfmsr r10 /* Get the current MSR */
3107 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3108 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3109 rlwinm r9,r10,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off 'rupts */
3110 mtmsr r9 /* Disable 'em */
3111 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
3112 lwz r4,PMAP_SPACE(r3) ; Get the space
3113 mfsprg r6,0 /* Get the per_proc_info address */
3114 xor r3,r3,r7 ; Get real address of bmap anchor
3115 stw r4,PP_USERSPACE(r6) /* Show our new address space */
3116 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
3117 mtmsr r10 /* Restore interruptions */
3118 blr /* Return... */
3119
3120 .align 5
3121 .globl EXT(hw_set_user_space_dis)
3122
3123 LEXT(hw_set_user_space_dis)
3124
3125 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
3126 lwz r4,PMAP_SPACE(r3) ; Get the space
3127 mfsprg r6,0 ; Get the per_proc_info address
3128 xor r3,r3,r7 ; Get real address of bmap anchor
3129 stw r4,PP_USERSPACE(r6) ; Show our new address space
3130 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
3131 blr ; Return...
3132
3133
3134 /* struct mapping *hw_cpv(struct mapping *mp) - Converts a physcial mapping CB address to virtual
3135 *
3136 */
3137
3138 .align 5
3139 .globl EXT(hw_cpv)
3140
3141 LEXT(hw_cpv)
3142
3143 rlwinm. r4,r3,0,0,19 ; Round back to the mapping block allocation control block
3144 mfmsr r10 ; Get the current MSR
3145 beq- hcpvret ; Skip if we are passed a 0...
3146 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3147 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3148 andi. r9,r10,0x7FEF ; Turn off interrupts and data translation
3149 mtmsr r9 ; Disable DR and EE
3150 isync
3151
3152 lwz r4,mbvrswap(r4) ; Get the conversion value
3153 mtmsr r10 ; Interrupts and DR back on
3154 isync
3155 xor r3,r3,r4 ; Convert to physical
3156
3157 hcpvret: rlwinm r3,r3,0,0,26 ; Clean out any flags
3158 blr
3159
3160
3161 /* struct mapping *hw_cvp(struct mapping *mp) - Converts a virtual mapping CB address to physcial
3162 *
3163 * Translation must be on for this
3164 *
3165 */
3166
3167 .align 5
3168 .globl EXT(hw_cvp)
3169
3170 LEXT(hw_cvp)
3171
3172 rlwinm r4,r3,0,0,19 ; Round back to the mapping block allocation control block
3173 rlwinm r3,r3,0,0,26 ; Clean out any flags
3174 lwz r4,mbvrswap(r4) ; Get the conversion value
3175 xor r3,r3,r4 ; Convert to virtual
3176 blr
3177
3178
3179 /* int mapalc(struct mappingblok *mb) - Finds, allocates, and checks a free mapping entry in a block
3180 *
3181 * Lock must already be held on mapping block list
3182 * returns 0 if all slots filled.
3183 * returns n if a slot is found and it is not the last
3184 * returns -n if a slot os found and it is the last
3185 * when n and -n are returned, the corresponding bit is cleared
3186 *
3187 */
3188
3189 .align 5
3190 .globl EXT(mapalc)
3191
3192 LEXT(mapalc)
3193
3194 lwz r4,mbfree(r3) ; Get the first mask
3195 lis r0,0x8000 ; Get the mask to clear the first free bit
3196 lwz r5,mbfree+4(r3) ; Get the second mask
3197 mr r12,r3 ; Save the return
3198 cntlzw r8,r4 ; Get first free field
3199 lwz r6,mbfree+8(r3) ; Get the third mask
3200 srw. r9,r0,r8 ; Get bit corresponding to first free one
3201 lwz r7,mbfree+12(r3) ; Get the fourth mask
3202 cntlzw r10,r5 ; Get first free field in second word
3203 andc r4,r4,r9 ; Turn it off
3204 bne malcfnd0 ; Found one...
3205
3206 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3207 cntlzw r11,r6 ; Get first free field in third word
3208 andc r5,r5,r9 ; Turn it off
3209 bne malcfnd1 ; Found one...
3210
3211 srw. r9,r0,r11 ; Get bit corresponding to first free one in third word
3212 cntlzw r10,r7 ; Get first free field in fourth word
3213 andc r6,r6,r9 ; Turn it off
3214 bne malcfnd2 ; Found one...
3215
3216 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3217 li r3,0 ; Assume abject failure
3218 andc r7,r7,r9 ; Turn it off
3219 beqlr ; There are none any left...
3220
3221 addi r3,r10,96 ; Set the correct bit number
3222 stw r7,mbfree+12(r12) ; Actually allocate the slot
3223
3224 mapafin: or r4,r4,r5 ; Merge the first two allocation maps
3225 or r6,r6,r7 ; Then the last two
3226 or. r4,r4,r6 ; Merge both halves
3227 bnelr+ ; Return if some left for next time...
3228
3229 neg r3,r3 ; Indicate we just allocated the last one
3230 blr ; Leave...
3231
3232 malcfnd0: stw r4,mbfree(r12) ; Actually allocate the slot
3233 mr r3,r8 ; Set the correct bit number
3234 b mapafin ; Exit now...
3235
3236 malcfnd1: stw r5,mbfree+4(r12) ; Actually allocate the slot
3237 addi r3,r10,32 ; Set the correct bit number
3238 b mapafin ; Exit now...
3239
3240 malcfnd2: stw r6,mbfree+8(r12) ; Actually allocate the slot
3241 addi r3,r11,64 ; Set the correct bit number
3242 b mapafin ; Exit now...
3243
3244
3245 /*
3246 * Log out all memory usage
3247 */
3248
3249 .align 5
3250 .globl EXT(logmem)
3251
3252 LEXT(logmem)
3253
3254 mfmsr r2 ; Get the MSR
3255 lis r10,hi16(EXT(DebugWork)) ; High part of area
3256 rlwinm r2,r2,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3257 lis r12,hi16(EXT(mem_actual)) ; High part of actual
3258 rlwinm r2,r2,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3259 andi. r0,r2,0x7FCF ; Interrupts and translation off
3260 ori r10,r10,lo16(EXT(DebugWork)) ; Get the entry
3261 mtmsr r0 ; Turn stuff off
3262 ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual
3263 li r0,1 ; Get a one
3264
3265 isync
3266
3267 stw r0,4(r10) ; Force logging off
3268 lwz r0,0(r12) ; Get the end of memory
3269
3270 lis r12,hi16(EXT(mem_size)) ; High part of defined memory
3271 ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory
3272 lwz r12,0(r12) ; Make it end of defined
3273
3274 cmplw r0,r12 ; Is there room for the data?
3275 ble- logmemexit ; No, do not even try...
3276
3277 stw r12,0(r12) ; Set defined memory size
3278 stw r0,4(r12) ; Set the actual amount of memory
3279
3280 lis r3,hi16(EXT(hash_table_base)) ; Hash table address
3281 lis r4,hi16(EXT(hash_table_size)) ; Hash table size
3282 lis r5,hi16(EXT(pmap_mem_regions)) ; Memory regions
3283 lis r6,hi16(EXT(mapCtl)) ; Mappings
3284 ori r3,r3,lo16(EXT(hash_table_base))
3285 ori r4,r4,lo16(EXT(hash_table_size))
3286 ori r5,r5,lo16(EXT(pmap_mem_regions))
3287 ori r6,r6,lo16(EXT(mapCtl))
3288 lwz r3,0(r3)
3289 lwz r4,0(r4)
3290 lwz r5,4(r5) ; Get the pointer to the phys_ent table
3291 lwz r6,0(r6) ; Get the pointer to the current mapping block
3292 stw r3,8(r12) ; Save the hash table address
3293 stw r4,12(r12) ; Save the hash table size
3294 stw r5,16(r12) ; Save the physent pointer
3295 stw r6,20(r12) ; Save the mappings
3296
3297 addi r11,r12,0x1000 ; Point to area to move hash table and PCA
3298
3299 add r4,r4,r4 ; Double size for both
3300
3301 copyhash: lwz r7,0(r3) ; Copy both of them
3302 lwz r8,4(r3)
3303 lwz r9,8(r3)
3304 lwz r10,12(r3)
3305 subic. r4,r4,0x10
3306 addi r3,r3,0x10
3307 stw r7,0(r11)
3308 stw r8,4(r11)
3309 stw r9,8(r11)
3310 stw r10,12(r11)
3311 addi r11,r11,0x10
3312 bgt+ copyhash
3313
3314 rlwinm r4,r12,20,12,31 ; Get number of phys_ents
3315
3316 copyphys: lwz r7,0(r5) ; Copy physents
3317 lwz r8,4(r5)
3318 subic. r4,r4,1
3319 addi r5,r5,8
3320 stw r7,0(r11)
3321 stw r8,4(r11)
3322 addi r11,r11,8
3323 bgt+ copyphys
3324
3325 addi r11,r11,4095 ; Round up to next page
3326 rlwinm r11,r11,0,0,19
3327
3328 lwz r4,4(r6) ; Get the size of the mapping area
3329
3330 copymaps: lwz r7,0(r6) ; Copy the mappings
3331 lwz r8,4(r6)
3332 lwz r9,8(r6)
3333 lwz r10,12(r6)
3334 subic. r4,r4,0x10
3335 addi r6,r6,0x10
3336 stw r7,0(r11)
3337 stw r8,4(r11)
3338 stw r9,8(r11)
3339 stw r10,12(r11)
3340 addi r11,r11,0x10
3341 bgt+ copymaps
3342
3343 sub r11,r11,r12 ; Get the total length we saved
3344 stw r11,24(r12) ; Save the size
3345
3346 logmemexit: mtmsr r2 ; Back to normal
3347 li r3,0
3348 isync
3349 blr
3350
3351