2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 #include <db_machine_commands.h>
31 #include <mach_debug.h>
33 #include <ppc/proc_reg.h>
34 #include <ppc/exception.h>
35 #include <ppc/Performance.h>
36 #include <ppc/exception.h>
37 #include <ppc/pmap_internals.h>
38 #include <mach/ppc/vm_param.h>
45 * Random notes and musings...
47 * Access to mappings via the PTEG hash must be done with the list locked.
48 * Access via the physical entries is controlled by the physent lock.
49 * Access to mappings is controlled by the PTEG lock once they are queued.
50 * If they are not on the list, they don't really exist, so
51 * only one processor at a time can find them, so no access control is needed.
53 * The second half of the PTE is kept in the physical entry. It is done this
54 * way, because there may be multiple mappings that refer to the same physical
55 * page (i.e., address aliases or synonymns). We must do it this way, because
56 * maintenance of the reference and change bits becomes nightmarish if each mapping
57 * has its own. One side effect of this, and not necessarily a bad one, is that
58 * all mappings for a single page can have a single WIMG, protection state, and RC bits.
59 * The only "bad" thing, is the reference bit. With a single copy, we can not get
60 * a completely accurate working set calculation, i.e., we can't tell which mapping was
61 * used to reference the page, all we can tell is that the physical page was
64 * The master copys of the reference and change bits are kept in the phys_entry.
65 * Other than the reference and change bits, changes to the phys_entry are not
66 * allowed if it has any mappings. The master reference and change bits must be
67 * changed via atomic update.
69 * Invalidating a PTE merges the RC bits into the phys_entry.
71 * Before checking the reference and/or bits, ALL mappings to the physical page are
74 * PTEs are never explicitly validated, they are always faulted in. They are also
75 * not visible outside of the hw_vm modules. Complete seperation of church and state.
77 * Removal of a mapping is invalidates its PTE.
79 * So, how do we deal with mappings to I/O space? We don't have a physent for it.
80 * Within the mapping is a copy of the second half of the PTE. This is used
81 * ONLY when there is no physical entry. It is swapped into the PTE whenever
82 * it is built. There is no need to swap it back out, because RC is not
83 * maintained for these mappings.
85 * So, I'm starting to get concerned about the number of lwarx/stcwx loops in
86 * this. Satisfying a mapped address with no stealing requires one lock. If we
87 * steal an entry, there's two locks and an atomic update. Invalidation of an entry
88 * takes one lock and, if there is a PTE, another lock and an atomic update. Other
89 * operations are multiples (per mapping) of the above. Maybe we should look for
90 * an alternative. So far, I haven't found one, but I haven't looked hard.
94 /* hw_add_map(struct mapping *mp, space_t space, vm_offset_t va) - Adds a mapping
96 * Adds a mapping to the PTEG hash list.
98 * Interrupts must be disabled before calling.
100 * Using the space and the virtual address, we hash into the hash table
101 * and get a lock on the PTEG hash chain. Then we chain the
102 * mapping to the front of the list.
107 .globl EXT(hw_add_map)
111 #if PERFTIMES && DEBUG
115 bl EXT(dbgLog2) ; Start of hw_add_map
120 mfmsr r0 /* Get the MSR */
121 eqv r6,r6,r6 /* Fill the bottom with foxes */
122 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
123 rlwinm r11,r4,6,6,25 /* Position the space for the VSID */
124 mfspr r10,sdr1 /* Get hash table base and size */
125 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
126 rlwimi r11,r5,30,2,5 /* Insert the segment no. to make a VSID */
127 mfsprg r12,2 ; Get feature flags
128 rlwimi r6,r10,16,0,15 /* Make table size -1 out of mask */
129 rlwinm r7,r5,26,10,25 /* Isolate the page index */
130 or r8,r10,r6 /* Point to the last byte in table */
131 rlwinm r9,r5,4,0,3 ; Move nybble 1 up to 0
132 xor r7,r7,r11 /* Get primary hash */
133 mtcrf 0x04,r12 ; Set the features
134 andi. r12,r0,0x7FCF /* Disable translation and interruptions */
135 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
136 addi r8,r8,1 /* Point to the PTEG Control Area */
137 xor r9,r9,r5 ; Splooch vaddr nybble 0 and 1 together
138 and r7,r7,r6 /* Wrap the hash */
139 rlwimi r11,r5,10,26,31 /* Move API into pte ID */
140 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
141 add r8,r8,r7 /* Point to our PCA entry */
142 rlwinm r10,r4,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
144 bt pfNoMSRirb,hamNoMSR ; No MSR...
146 mtmsr r12 ; Translation and all off
147 isync ; Toss prefetch
150 hamNoMSR: mr r4,r0 ; Save R0
152 li r0,loadMSR ; Get the MSR setter SC
153 mr r3,r12 ; Get new MSR
159 la r4,PCAhash(r8) /* Point to the mapping hash area */
160 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
161 isync /* Get rid of anything prefetched before we ref storage */
163 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
164 * and word 0 of the PTE (the virtual part).
166 * Now, we just lock the PCA.
169 li r12,1 /* Get the locked value */
170 dcbt 0,r4 /* We'll need the hash area in a sec, so get it */
171 add r4,r4,r9 /* Point to the right mapping hash slot */
173 ptegLckx: lwarx r10,0,r8 /* Get the PTEG lock */
174 mr. r10,r10 /* Is it locked? */
175 bne- ptegLckwx /* Yeah... */
176 stwcx. r12,0,r8 /* Take take it */
177 bne- ptegLckx /* Someone else was trying, try again... */
178 b ptegSXgx /* All done... */
182 ptegLckwx: mr. r10,r10 /* Check if it's already held */
183 beq+ ptegLckx /* It's clear... */
184 lwz r10,0(r8) /* Get lock word again... */
185 b ptegLckwx /* Wait... */
189 ptegSXgx: isync /* Make sure we haven't used anything yet */
191 lwz r7,0(r4) /* Pick up the anchor of hash list */
192 stw r3,0(r4) /* Save the new head */
193 stw r7,mmhashnext(r3) /* Chain in the old head */
195 stw r4,mmPTEhash(r3) /* Point to the head of the hash list */
197 sync /* Make sure the chain is updated */
198 stw r10,0(r8) /* Unlock the hash list */
199 mtmsr r0 /* Restore translation and interruptions */
200 isync /* Toss anything done with DAT off */
201 #if PERFTIMES && DEBUG
205 bl EXT(dbgLog2) ; end of hw_add_map
212 /* mp=hw_lock_phys_vir(space, va) - Finds and locks a physical entry by vaddr.
214 * Returns the mapping with the associated physent locked if found, or a
215 * zero and no lock if not. It we timed out trying to get a the lock on
216 * the physical entry, we retun a 1. A physical entry can never be on an
217 * odd boundary, so we can distinguish between a mapping and a timeout code.
219 * Interrupts must be disabled before calling.
221 * Using the space and the virtual address, we hash into the hash table
222 * and get a lock on the PTEG hash chain. Then we search the chain for the
223 * mapping for our virtual address. From there, we extract the pointer to
224 * the physical entry.
226 * Next comes a bit of monkey business. we need to get a lock on the physical
227 * entry. But, according to our rules, we can't get it after we've gotten the
228 * PTEG hash lock, we could deadlock if we do. So, we need to release the
229 * hash lock. The problem is, though, that as soon as we release it, some
230 * other yahoo may remove our mapping between the time that we release the
231 * hash lock and obtain the phys entry lock. So, we can't count on the
232 * mapping once we release the lock. Instead, after we lock the phys entry,
233 * we search the mapping list (phys_link) for our translation. If we don't find it,
234 * we unlock the phys entry, bail out, and return a 0 for the mapping address. If we
235 * did find it, we keep the lock and return the address of the mapping block.
237 * What happens when a mapping is found, but there is no physical entry?
238 * This is what happens when there is I/O area mapped. It one of these mappings
239 * is found, the mapping is returned, as is usual for this call, but we don't
240 * try to lock anything. There could possibly be some problems here if another
241 * processor releases the mapping while we still alre using it. Hope this
242 * ain't gonna happen.
244 * Taaa-dahhh! Easy as pie, huh?
246 * So, we have a few hacks hacks for running translate off in here.
247 * First, when we call the lock routine, we have carnel knowlege of the registers is uses.
248 * That way, we don't need a stack frame, which we can't have 'cause the stack is in
249 * virtual storage. But wait, as if that's not enough... We need one more register. So,
250 * we cram the LR into the CTR and return from there.
254 .globl EXT(hw_lock_phys_vir)
256 LEXT(hw_lock_phys_vir)
258 #if PERFTIMES && DEBUG
262 bl EXT(dbgLog2) ; Start of hw_add_map
266 mfmsr r12 /* Get the MSR */
267 eqv r6,r6,r6 /* Fill the bottom with foxes */
268 mfsprg r9,2 ; Get feature flags
269 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
270 rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
271 mfspr r5,sdr1 /* Get hash table base and size */
272 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
273 mtcrf 0x04,r9 ; Set the features
274 rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
275 rlwimi r6,r5,16,0,15 /* Make table size -1 out of mask */
276 andi. r0,r12,0x7FCF /* Disable translation and interruptions */
277 rlwinm r9,r4,4,0,3 ; Move nybble 1 up to 0
278 rlwinm r7,r4,26,10,25 /* Isolate the page index */
279 or r8,r5,r6 /* Point to the last byte in table */
280 xor r7,r7,r11 /* Get primary hash */
281 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
282 addi r8,r8,1 /* Point to the PTEG Control Area */
283 xor r9,r9,r4 ; Splooch vaddr nybble 0 and 1 together
284 and r7,r7,r6 /* Wrap the hash */
285 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
286 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
287 add r8,r8,r7 /* Point to our PCA entry */
288 rlwinm r10,r3,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
290 bt pfNoMSRirb,hlpNoMSR ; No MSR...
292 mtmsr r0 ; Translation and all off
293 isync ; Toss prefetch
296 hlpNoMSR: mr r3,r0 ; Get the new MSR
297 li r0,loadMSR ; Get the MSR setter SC
301 la r3,PCAhash(r8) /* Point to the mapping hash area */
302 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
303 isync /* Make sure translation is off before we ref storage */
306 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
307 * and word 0 of the PTE (the virtual part).
309 * Now, we just lock the PCA and find our mapping, if it exists.
312 dcbt 0,r3 /* We'll need the hash area in a sec, so get it */
313 add r3,r3,r9 /* Point to the right mapping hash slot */
315 ptegLcka: lwarx r10,0,r8 /* Get the PTEG lock */
316 li r5,1 /* Get the locked value */
317 mr. r10,r10 /* Is it locked? */
318 bne- ptegLckwa /* Yeah... */
319 stwcx. r5,0,r8 /* Take take it */
320 bne- ptegLcka /* Someone else was trying, try again... */
321 b ptegSXga /* All done... */
325 ptegLckwa: mr. r10,r10 /* Check if it's already held */
326 beq+ ptegLcka /* It's clear... */
327 lwz r10,0(r8) /* Get lock word again... */
328 b ptegLckwa /* Wait... */
332 ptegSXga: isync /* Make sure we haven't used anything yet */
334 mflr r0 /* Get the LR */
335 lwz r9,0(r3) /* Pick up the first mapping block */
336 mtctr r0 /* Stuff it into the CTR */
340 mr. r3,r9 /* Did we hit the end? */
341 bne+ chkmapa /* Nope... */
343 stw r3,0(r8) /* Unlock the PTEG lock
344 Note: we never saved anything while we
345 had the lock, so we don't need a sync
346 before we unlock it */
348 vbail: mtmsr r12 /* Restore translation and interruptions */
349 isync /* Make sure translation is cool */
350 #if PERFTIMES && DEBUG
354 bl EXT(dbgLog2) ; Start of hw_add_map
358 bctr /* Return in abject failure... */
362 chkmapa: lwz r10,mmPTEv(r3) /* Pick up our virtual ID */
363 lwz r9,mmhashnext(r3) /* Pick up next mapping block */
364 cmplw r10,r11 /* Have we found ourself? */
365 bne- findmapa /* Nope, still wandering... */
367 lwz r9,mmphysent(r3) /* Get our physical entry pointer */
368 li r5,0 /* Clear this out */
369 mr. r9,r9 /* Is there, like, a physical entry? */
370 stw r5,0(r8) /* Unlock the PTEG lock
371 Note: we never saved anything while we
372 had the lock, so we don't need a sync
373 before we unlock it */
375 beq- vbail /* If there is no physical entry, it's time
378 /* Here we want to call hw_lock_bit. We don't want to use the stack, 'cause it's
379 * in virtual storage, and we're in real. So, we've carefully looked at the code
380 * in hw_lock_bit (and unlock) and cleverly don't use any of the registers that it uses.
381 * Be very, very aware of how you change this code. By the way, it uses:
382 * R0, R6, R7, R8, and R9. R3, R4, and R5 contain parameters
383 * Unfortunatly, we need to stash R9 still. So... Since we know we will not be interrupted
384 * ('cause we turned off interruptions and translation is off) we will use SPRG3...
387 lwz r10,mmPTEhash(r3) /* Save the head of the hash-alike chain. We need it to find ourselves later */
388 lis r5,HIGH_ADDR(EXT(LockTimeOut)) /* Get address of timeout value */
389 la r3,pephyslink(r9) /* Point to the lock word */
390 ori r5,r5,LOW_ADDR(EXT(LockTimeOut)) /* Get second half of address */
391 li r4,PHYS_LOCK /* Get the lock bit value */
392 lwz r5,0(r5) /* Pick up the timeout value */
393 mtsprg 3,r9 /* Save R9 in SPRG3 */
395 bl EXT(hw_lock_bit) /* Go do the lock */
397 mfsprg r9,3 /* Restore pointer to the phys_entry */
398 mr. r3,r3 /* Did we timeout? */
399 lwz r4,pephyslink(r9) /* Pick up first mapping block */
400 beq- penterr /* Bad deal, we timed out... */
402 rlwinm r4,r4,0,0,26 ; Clear out the flags from first link
404 findmapb: mr. r3,r4 /* Did we hit the end? */
405 bne+ chkmapb /* Nope... */
407 la r3,pephyslink(r9) /* Point to where the lock is */
408 li r4,PHYS_LOCK /* Get the lock bit value */
409 bl EXT(hw_unlock_bit) /* Go unlock the physentry */
411 li r3,0 /* Say we failed */
412 b vbail /* Return in abject failure... */
414 penterr: li r3,1 /* Set timeout */
415 b vbail /* Return in abject failure... */
419 chkmapb: lwz r6,mmPTEv(r3) /* Pick up our virtual ID */
420 lwz r4,mmnext(r3) /* Pick up next mapping block */
421 cmplw r6,r11 /* Have we found ourself? */
422 lwz r5,mmPTEhash(r3) /* Get the start of our hash chain */
423 bne- findmapb /* Nope, still wandering... */
424 cmplw r5,r10 /* On the same hash chain? */
425 bne- findmapb /* Nope, keep looking... */
427 b vbail /* Return in glorious triumph... */
431 * hw_rem_map(mapping) - remove a mapping from the system.
433 * Upon entry, R3 contains a pointer to a mapping block and the associated
434 * physical entry is locked if there is one.
436 * If the mapping entry indicates that there is a PTE entry, we invalidate
437 * if and merge the reference and change information into the phys_entry.
439 * Next, we remove the mapping from the phys_ent and the PTEG hash list.
441 * Unlock any locks that are left, and exit.
443 * Note that this must be done with both interruptions off and VM off
445 * Note that this code depends upon the VSID being of the format 00SXXXXX
446 * where S is the segment number.
452 .globl EXT(hw_rem_map)
455 #if PERFTIMES && DEBUG
459 bl EXT(dbgLog2) ; Start of hw_add_map
463 mfsprg r9,2 ; Get feature flags
464 mfmsr r0 /* Save the MSR */
465 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
466 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
467 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
468 mtcrf 0x04,r9 ; Set the features
469 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
471 bt pfNoMSRirb,lmvNoMSR ; No MSR...
473 mtmsr r12 ; Translation and all off
474 isync ; Toss prefetch
480 li r0,loadMSR ; Get the MSR setter SC
481 mr r3,r12 ; Get new MSR
489 lwz r6,mmPTEhash(r3) /* Get pointer to hash list anchor */
490 lwz r5,mmPTEv(r3) /* Get the VSID */
491 dcbt 0,r6 /* We'll need that chain in a bit */
493 rlwinm r7,r6,0,0,25 /* Round hash list down to PCA boundary */
494 li r12,1 /* Get the locked value */
495 subi r6,r6,mmhashnext /* Make the anchor look like an entry */
497 ptegLck1: lwarx r10,0,r7 /* Get the PTEG lock */
498 mr. r10,r10 /* Is it locked? */
499 bne- ptegLckw1 /* Yeah... */
500 stwcx. r12,0,r7 /* Try to take it */
501 bne- ptegLck1 /* Someone else was trying, try again... */
502 b ptegSXg1 /* All done... */
506 ptegLckw1: mr. r10,r10 /* Check if it's already held */
507 beq+ ptegLck1 /* It's clear... */
508 lwz r10,0(r7) /* Get lock word again... */
509 b ptegLckw1 /* Wait... */
513 ptegSXg1: isync /* Make sure we haven't used anything yet */
515 lwz r12,mmhashnext(r3) /* Prime with our forward pointer */
516 lwz r4,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
518 srchmaps: mr. r10,r6 /* Save the previous entry */
519 bne+ mapok /* No error... */
521 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
522 ori r0,r0,LOW_ADDR(Choke)
523 sc /* Firmware Heimlich manuever */
527 mapok: lwz r6,mmhashnext(r6) /* Look at the next one */
528 cmplwi cr5,r4,0 /* Is there a PTE? */
529 cmplw r6,r3 /* Have we found ourselves? */
530 bne+ srchmaps /* Nope, get your head together... */
532 stw r12,mmhashnext(r10) /* Remove us from the queue */
533 rlwinm r9,r5,1,0,3 /* Move in the segment */
534 rlwinm r8,r4,6,4,19 /* Line PTEG disp up to a page */
535 rlwinm r11,r5,5,4,19 /* Line up the VSID */
536 lwz r10,mmphysent(r3) /* Point to the physical entry */
538 beq+ cr5,nopte /* There's no PTE to invalidate... */
540 xor r8,r8,r11 /* Back hash to virt index */
541 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
542 rlwimi r9,r5,22,4,9 /* Move in the API */
543 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
544 mfspr r11,pvr /* Find out what kind of machine we are */
545 rlwimi r9,r8,0,10,19 /* Create the virtual address */
546 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
548 stw r5,0(r4) /* Make the PTE invalid */
550 cmplwi cr1,r11,3 /* Is this a 603? */
551 sync /* Make sure the invalid is stored */
553 tlbhang1: lwarx r5,0,r12 /* Get the TLBIE lock */
554 rlwinm r11,r4,29,29,31 /* Get the bit position of entry */
555 mr. r5,r5 /* Is it locked? */
556 lis r6,0x8000 /* Start up a bit mask */
557 li r5,1 /* Get our lock word */
558 bne- tlbhang1 /* It's locked, go wait... */
559 stwcx. r5,0,r12 /* Try to get it */
560 bne- tlbhang1 /* We was beat... */
562 srw r6,r6,r11 /* Make a "free slot" mask */
563 lwz r5,PCAallo(r7) /* Get the allocation control bits */
564 rlwinm r11,r6,24,8,15 /* Make the autogen bit to turn off */
565 or r5,r5,r6 /* turn on the free bit */
566 rlwimi r11,r11,24,16,23 /* Get lock bit mask to turn it off */
568 andc r5,r5,r11 /* Turn off the lock and autogen bits in allocation flags */
569 li r11,0 /* Lock clear value */
571 tlbie r9 /* Invalidate it everywhere */
574 beq- cr1,its603a /* It's a 603, skip the tlbsync... */
576 eieio /* Make sure that the tlbie happens first */
577 tlbsync /* wait for everyone to catch up */
580 its603a: sync /* Make sure of it all */
581 stw r11,0(r12) /* Clear the tlbie lock */
582 eieio /* Make sure those RC bit are loaded */
583 stw r5,PCAallo(r7) /* Show that the slot is free */
584 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
586 nopte: mr. r10,r10 /* See if there is a physical entry */
587 la r9,pephyslink(r10) /* Point to the physical mapping chain */
588 beq- nophys /* No physical entry, we're done... */
589 beq- cr5,nadamrg /* No PTE to merge... */
591 lwz r6,4(r4) /* Get the latest reference and change bits */
592 la r12,pepte1(r10) /* Point right at the master copy */
593 rlwinm r6,r6,0,23,24 /* Extract just the RC bits */
595 mrgrc: lwarx r8,0,r12 /* Get the master copy */
596 or r8,r8,r6 /* Merge in latest RC */
597 stwcx. r8,0,r12 /* Save it back */
598 bne- mrgrc /* If it changed, try again... */
600 nadamrg: li r11,0 /* Clear this out */
601 lwz r12,mmnext(r3) /* Prime with our next */
603 sync ; Make sure all is saved
605 stw r11,0(r7) /* Unlock the hash chain now so we don't
606 lock out another processor during
607 our next little search */
609 srchpmap: mr. r10,r9 /* Save the previous entry */
610 bne+ mapok1 /* No error... */
612 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
613 ori r0,r0,LOW_ADDR(Choke)
614 sc /* Firmware Heimlich maneuver */
618 mapok1: lwz r9,mmnext(r9) /* Look at the next one */
619 rlwinm r8,r9,0,27,31 ; Save the flags (including the lock)
620 rlwinm r9,r9,0,0,26 ; Clear out the flags from first link
621 cmplw r9,r3 /* Have we found ourselves? */
622 bne+ srchpmap /* Nope, get your head together... */
624 rlwimi r12,r8,0,27,31 ; Insert the lock and flags */
625 stw r12,mmnext(r10) /* Remove us from the queue */
627 mtmsr r0 /* Interrupts and translation back on */
629 #if PERFTIMES && DEBUG
632 bl EXT(dbgLog2) ; Start of hw_add_map
639 nophys: li r4,0 /* Make sure this is 0 */
640 sync /* Make sure that chain is updated */
641 stw r4,0(r7) /* Unlock the hash chain */
642 mtmsr r0 /* Interrupts and translation back on */
644 #if PERFTIMES && DEBUG
647 bl EXT(dbgLog2) ; Start of hw_add_map
654 * hw_prot(physent, prot) - Change the protection of a physical page
656 * Upon entry, R3 contains a pointer to a physical entry which is locked.
657 * R4 contains the PPC protection bits.
659 * The first thing we do is to slam the new protection into the phys entry.
660 * Then we scan the mappings and process each one.
662 * Acquire the lock on the PTEG hash list for the mapping being processed.
664 * If the current mapping has a PTE entry, we invalidate
665 * it and merge the reference and change information into the phys_entry.
667 * Next, slam the protection bits into the entry and unlock the hash list.
669 * Note that this must be done with both interruptions off and VM off
678 #if PERFTIMES && DEBUG
684 bl EXT(dbgLog2) ; Start of hw_add_map
688 mfsprg r9,2 ; Get feature flags
689 mfmsr r0 /* Save the MSR */
690 li r5,pepte1 /* Get displacement to the second word of master pte */
691 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
692 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
693 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
694 mtcrf 0x04,r9 ; Set the features
695 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
697 bt pfNoMSRirb,hpNoMSR ; No MSR...
699 mtmsr r12 ; Translation and all off
700 isync ; Toss prefetch
706 li r0,loadMSR ; Get the MSR setter SC
707 mr r3,r12 ; Get new MSR
715 lwz r10,pephyslink(r3) /* Get the first mapping block */
716 rlwinm r10,r10,0,0,26 ; Clear out the flags from first link
719 * Note that we need to to do the interlocked update here because another processor
720 * can be updating the reference and change bits even though the physical entry
721 * is locked. All modifications to the PTE portion of the physical entry must be
722 * done via interlocked update.
725 protcng: lwarx r8,r5,r3 /* Get the master copy */
726 rlwimi r8,r4,0,30,31 /* Move in the protection bits */
727 stwcx. r8,r5,r3 /* Save it back */
728 bne- protcng /* If it changed, try again... */
732 protnext: mr. r10,r10 /* Are there any more mappings? */
733 beq- protdone /* Naw... */
735 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
736 lwz r5,mmPTEv(r10) /* Get the virtual address */
737 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
739 li r12,1 /* Get the locked value */
741 protLck1: lwarx r11,0,r7 /* Get the PTEG lock */
742 mr. r11,r11 /* Is it locked? */
743 bne- protLckw1 /* Yeah... */
744 stwcx. r12,0,r7 /* Try to take it */
745 bne- protLck1 /* Someone else was trying, try again... */
746 b protSXg1 /* All done... */
750 protLckw1: mr. r11,r11 /* Check if it's already held */
751 beq+ protLck1 /* It's clear... */
752 lwz r11,0(r7) /* Get lock word again... */
753 b protLckw1 /* Wait... */
757 protSXg1: isync /* Make sure we haven't used anything yet */
759 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
761 rlwinm r9,r5,1,0,3 /* Move in the segment */
762 lwz r2,mmPTEr(r10) ; Get the mapping copy of the PTE
763 mr. r6,r6 /* See if there is a PTE here */
764 rlwinm r8,r5,31,2,25 /* Line it up */
765 rlwimi r2,r4,0,30,31 ; Move protection bits into the mapping copy
767 beq+ protul /* There's no PTE to invalidate... */
769 xor r8,r8,r6 /* Back hash to virt index */
770 rlwimi r9,r5,22,4,9 /* Move in the API */
771 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
772 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
773 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
774 mfspr r11,pvr /* Find out what kind of machine we are */
775 rlwimi r9,r8,6,10,19 /* Create the virtual address */
776 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
778 stw r5,0(r6) /* Make the PTE invalid */
779 cmplwi cr1,r11,3 /* Is this a 603? */
780 sync /* Make sure the invalid is stored */
782 tlbhangp: lwarx r11,0,r12 /* Get the TLBIE lock */
783 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
784 mr. r11,r11 /* Is it locked? */
785 lis r5,0x8000 /* Start up a bit mask */
786 li r11,1 /* Get our lock word */
787 bne- tlbhangp /* It's locked, go wait... */
788 stwcx. r11,0,r12 /* Try to get it */
789 bne- tlbhangp /* We was beat... */
791 li r11,0 /* Lock clear value */
793 tlbie r9 /* Invalidate it everywhere */
795 beq- cr1,its603p /* It's a 603, skip the tlbsync... */
797 eieio /* Make sure that the tlbie happens first */
798 tlbsync /* wait for everyone to catch up */
801 its603p: stw r11,0(r12) /* Clear the lock */
802 srw r5,r5,r8 /* Make a "free slot" mask */
803 sync /* Make sure of it all */
805 lwz r6,4(r6) /* Get the latest reference and change bits */
806 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
807 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
808 lwz r9,PCAallo(r7) /* Get the allocation control bits */
809 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
810 rlwimi r2,r6,0,23,24 ; Put the latest RC bit in mapping copy
811 or r9,r9,r5 /* Set the slot free */
812 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
813 andc r9,r9,r8 /* Clear the auto and lock bits */
814 li r5,pepte1 /* Get displacement to the second word of master pte */
815 stw r9,PCAallo(r7) /* Store the allocation controls */
817 protmod: lwarx r11,r5,r3 /* Get the master copy */
818 or r11,r11,r6 /* Merge in latest RC */
819 stwcx. r11,r5,r3 /* Save it back */
820 bne- protmod /* If it changed, try again... */
822 protul: li r4,0 /* Get a 0 */
823 stw r2,mmPTEr(r10) ; Save the updated mapping PTE
824 lwz r10,mmnext(r10) /* Get the next */
826 sync ; Make sure stores are complete
828 stw r4,0(r7) /* Unlock the hash chain */
829 b protnext /* Go get the next one */
833 protdone: mtmsr r0 /* Interrupts and translation back on */
835 #if PERFTIMES && DEBUG
838 bl EXT(dbgLog2) ; Start of hw_add_map
845 * hw_prot_virt(mapping, prot) - Change the protection of single page
847 * Upon entry, R3 contains a pointer (real) to a mapping.
848 * R4 contains the PPC protection bits.
850 * Acquire the lock on the PTEG hash list for the mapping being processed.
852 * If the current mapping has a PTE entry, we invalidate
853 * it and merge the reference and change information into the phys_entry.
855 * Next, slam the protection bits into the entry, merge the RC bits,
856 * and unlock the hash list.
858 * Note that this must be done with both interruptions off and VM off
864 .globl EXT(hw_prot_virt)
867 #if PERFTIMES && DEBUG
873 bl EXT(dbgLog2) ; Start of hw_add_map
877 mfsprg r9,2 ; Get feature flags
878 mfmsr r0 /* Save the MSR */
879 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
880 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
881 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
882 mtcrf 0x04,r9 ; Set the features
883 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
885 bt pfNoMSRirb,hpvNoMSR ; No MSR...
887 mtmsr r12 ; Translation and all off
888 isync ; Toss prefetch
894 li r0,loadMSR ; Get the MSR setter SC
895 mr r3,r12 ; Get new MSR
904 * Note that we need to to do the interlocked update here because another processor
905 * can be updating the reference and change bits even though the physical entry
906 * is locked. All modifications to the PTE portion of the physical entry must be
907 * done via interlocked update.
910 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
911 lwz r5,mmPTEv(r3) /* Get the virtual address */
912 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
914 li r12,1 /* Get the locked value */
916 protvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
917 mr. r11,r11 /* Is it locked? */
918 bne- protvLckw1 /* Yeah... */
919 stwcx. r12,0,r7 /* Try to take it */
920 bne- protvLck1 /* Someone else was trying, try again... */
921 b protvSXg1 /* All done... */
925 protvLckw1: mr. r11,r11 /* Check if it's already held */
926 beq+ protvLck1 /* It's clear... */
927 lwz r11,0(r7) /* Get lock word again... */
928 b protvLckw1 /* Wait... */
932 protvSXg1: isync /* Make sure we haven't used anything yet */
934 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
935 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
937 rlwinm r9,r5,1,0,3 /* Move in the segment */
938 cmplwi cr7,r6,0 ; Any PTE to invalidate?
939 rlwimi r2,r4,0,30,31 ; Move in the new protection bits
940 rlwinm r8,r5,31,2,25 /* Line it up */
942 beq+ cr7,pvnophys /* There's no PTE to invalidate... */
944 xor r8,r8,r6 /* Back hash to virt index */
945 rlwimi r9,r5,22,4,9 /* Move in the API */
946 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
947 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
948 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
949 mfspr r11,pvr /* Find out what kind of machine we are */
950 rlwimi r9,r8,6,10,19 /* Create the virtual address */
951 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
953 stw r5,0(r6) /* Make the PTE invalid */
954 cmplwi cr1,r11,3 /* Is this a 603? */
955 sync /* Make sure the invalid is stored */
957 tlbhangpv: lwarx r11,0,r12 /* Get the TLBIE lock */
958 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
959 mr. r11,r11 /* Is it locked? */
960 lis r5,0x8000 /* Start up a bit mask */
961 li r11,1 /* Get our lock word */
962 bne- tlbhangpv /* It's locked, go wait... */
963 stwcx. r11,0,r12 /* Try to get it */
964 bne- tlbhangpv /* We was beat... */
966 li r11,0 /* Lock clear value */
968 tlbie r9 /* Invalidate it everywhere */
970 beq- cr1,its603pv /* It's a 603, skip the tlbsync... */
972 eieio /* Make sure that the tlbie happens first */
973 tlbsync /* wait for everyone to catch up */
976 its603pv: stw r11,0(r12) /* Clear the lock */
977 srw r5,r5,r8 /* Make a "free slot" mask */
978 sync /* Make sure of it all */
980 lwz r6,4(r6) /* Get the latest reference and change bits */
981 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
982 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
983 lwz r9,PCAallo(r7) /* Get the allocation control bits */
984 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
985 lwz r10,mmphysent(r3) ; Get any physical entry
986 or r9,r9,r5 /* Set the slot free */
987 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
988 andc r9,r9,r8 /* Clear the auto and lock bits */
989 mr. r10,r10 ; Is there a physical entry?
990 li r5,pepte1 /* Get displacement to the second word of master pte */
991 stw r9,PCAallo(r7) /* Store the allocation controls */
992 rlwimi r2,r6,0,23,24 ; Stick in RC bits
993 beq- pvnophys ; No physical entry...
995 protvmod: lwarx r11,r5,r10 /* Get the master copy */
996 or r11,r11,r6 /* Merge in latest RC */
997 stwcx. r11,r5,r10 /* Save it back */
998 bne- protvmod /* If it changed, try again... */
1000 pvnophys: li r4,0 /* Get a 0 */
1001 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1003 sync ; Make sure everything is stored
1005 stw r4,0(r7) /* Unlock the hash chain */
1006 mtmsr r0 ; Restore interrupts and translation
1009 #if PERFTIMES && DEBUG
1019 * hw_attr_virt(mapping, attr) - Change the attributes of single page
1021 * Upon entry, R3 contains a pointer (real) to a mapping.
1022 * R4 contains the WIMG bits.
1024 * Acquire the lock on the PTEG hash list for the mapping being processed.
1026 * If the current mapping has a PTE entry, we invalidate
1027 * it and merge the reference and change information into the phys_entry.
1029 * Next, slam the WIMG bits into the entry, merge the RC bits,
1030 * and unlock the hash list.
1032 * Note that this must be done with both interruptions off and VM off
1038 .globl EXT(hw_attr_virt)
1041 #if PERFTIMES && DEBUG
1047 bl EXT(dbgLog2) ; Start of hw_add_map
1051 mfsprg r9,2 ; Get feature flags
1052 mfmsr r0 /* Save the MSR */
1053 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1054 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1055 mtcrf 0x04,r9 ; Set the features
1056 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1057 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1059 bt pfNoMSRirb,havNoMSR ; No MSR...
1061 mtmsr r12 ; Translation and all off
1062 isync ; Toss prefetch
1068 li r0,loadMSR ; Get the MSR setter SC
1069 mr r3,r12 ; Get new MSR
1076 * Note that we need to to do the interlocked update here because another processor
1077 * can be updating the reference and change bits even though the physical entry
1078 * is locked. All modifications to the PTE portion of the physical entry must be
1079 * done via interlocked update.
1082 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
1083 lwz r5,mmPTEv(r3) /* Get the virtual address */
1084 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1086 li r12,1 /* Get the locked value */
1088 attrvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1089 mr. r11,r11 /* Is it locked? */
1090 bne- attrvLckw1 /* Yeah... */
1091 stwcx. r12,0,r7 /* Try to take it */
1092 bne- attrvLck1 /* Someone else was trying, try again... */
1093 b attrvSXg1 /* All done... */
1097 attrvLckw1: mr. r11,r11 /* Check if it's already held */
1098 beq+ attrvLck1 /* It's clear... */
1099 lwz r11,0(r7) /* Get lock word again... */
1100 b attrvLckw1 /* Wait... */
1104 attrvSXg1: isync /* Make sure we haven't used anything yet */
1106 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
1107 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
1109 rlwinm r9,r5,1,0,3 /* Move in the segment */
1110 mr. r6,r6 /* See if there is a PTE here */
1111 rlwimi r2,r4,0,25,28 ; Move in the new attribute bits
1112 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1114 beq+ avnophys /* There's no PTE to invalidate... */
1116 xor r8,r8,r6 /* Back hash to virt index */
1117 rlwimi r9,r5,22,4,9 /* Move in the API */
1118 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1119 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1120 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1121 mfspr r11,pvr /* Find out what kind of machine we are */
1122 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1123 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
1124 stw r5,0(r6) /* Make the PTE invalid */
1125 cmplwi cr1,r11,3 /* Is this a 603? */
1126 sync /* Make sure the invalid is stored */
1128 tlbhangav: lwarx r11,0,r12 /* Get the TLBIE lock */
1129 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1130 mr. r11,r11 /* Is it locked? */
1131 lis r5,0x8000 /* Start up a bit mask */
1132 li r11,1 /* Get our lock word */
1133 bne- tlbhangav /* It's locked, go wait... */
1134 stwcx. r11,0,r12 /* Try to get it */
1135 bne- tlbhangav /* We was beat... */
1137 li r11,0 /* Lock clear value */
1139 tlbie r9 /* Invalidate it everywhere */
1141 beq- cr1,its603av /* It's a 603, skip the tlbsync... */
1143 eieio /* Make sure that the tlbie happens first */
1144 tlbsync /* wait for everyone to catch up */
1147 its603av: stw r11,0(r12) /* Clear the lock */
1148 srw r5,r5,r8 /* Make a "free slot" mask */
1149 sync /* Make sure of it all */
1151 lwz r6,4(r6) /* Get the latest reference and change bits */
1152 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
1153 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
1154 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1155 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1156 lwz r10,mmphysent(r3) ; Get any physical entry
1157 or r9,r9,r5 /* Set the slot free */
1158 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1159 andc r9,r9,r8 /* Clear the auto and lock bits */
1160 mr. r10,r10 ; Is there a physical entry?
1161 li r5,pepte1 /* Get displacement to the second word of master pte */
1162 stw r9,PCAallo(r7) /* Store the allocation controls */
1163 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1164 beq- avnophys ; No physical entry...
1166 attrvmod: lwarx r11,r5,r10 /* Get the master copy */
1167 or r11,r11,r6 /* Merge in latest RC */
1168 stwcx. r11,r5,r10 /* Save it back */
1169 bne- attrvmod /* If it changed, try again... */
1171 avnophys: li r4,0 /* Get a 0 */
1172 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1174 sync ; Make sure that everything is updated
1176 stw r4,0(r7) /* Unlock the hash chain */
1178 rlwinm r2,r2,0,0,19 ; Clear back to page boundary
1180 attrflsh: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1181 dcbst r2,r4 ; Flush cache because we changed attributes
1182 addi r4,r4,32 ; Bump up cache
1183 blt+ attrflsh ; Do the whole page...
1187 attrimvl: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1188 dcbi r2,r4 ; Invalidate dcache because we changed attributes
1189 icbi r2,r4 ; Invalidate icache because we changed attributes
1190 addi r4,r4,32 ; Bump up cache
1191 blt+ attrimvl ; Do the whole page...
1194 mtmsr r0 ; Restore interrupts and translation
1197 #if PERFTIMES && DEBUG
1207 * hw_pte_comm(physent) - Do something to the PTE pointing to a physical page
1209 * Upon entry, R3 contains a pointer to a physical entry which is locked.
1210 * Note that this must be done with both interruptions off and VM off
1212 * First, we set up CRs 5 and 7 to indicate which of the 7 calls this is.
1214 * Now we scan the mappings to invalidate any with an active PTE.
1216 * Acquire the lock on the PTEG hash list for the mapping being processed.
1218 * If the current mapping has a PTE entry, we invalidate
1219 * it and merge the reference and change information into the phys_entry.
1221 * Next, unlock the hash list and go on to the next mapping.
1228 .globl EXT(hw_inv_all)
1232 li r9,0x800 /* Indicate invalidate all */
1233 li r2,0 ; No inadvertant modifications please
1234 b hw_pte_comm /* Join in the fun... */
1238 .globl EXT(hw_tst_mod)
1242 lwz r8,pepte1(r3) ; Get the saved PTE image
1243 li r9,0x400 /* Indicate test modify */
1244 li r2,0 ; No inadvertant modifications please
1245 rlwinm. r8,r8,25,31,31 ; Make change bit into return code
1246 beq+ hw_pte_comm ; Assume we do not know if it is set...
1247 mr r3,r8 ; Set the return code
1248 blr ; Return quickly...
1251 .globl EXT(hw_tst_ref)
1254 lwz r8,pepte1(r3) ; Get the saved PTE image
1255 li r9,0x200 /* Indicate test reference bit */
1256 li r2,0 ; No inadvertant modifications please
1257 rlwinm. r8,r8,24,31,31 ; Make reference bit into return code
1258 beq+ hw_pte_comm ; Assume we do not know if it is set...
1259 mr r3,r8 ; Set the return code
1260 blr ; Return quickly...
1263 * Note that the following are all in one CR for ease of use later
1266 .globl EXT(hw_set_mod)
1270 li r9,0x008 /* Indicate set modify bit */
1271 li r2,0x4 ; Set set C, clear none
1272 b hw_pte_comm /* Join in the fun... */
1276 .globl EXT(hw_clr_mod)
1280 li r9,0x004 /* Indicate clear modify bit */
1281 li r2,0x1 ; Set set none, clear C
1282 b hw_pte_comm /* Join in the fun... */
1286 .globl EXT(hw_set_ref)
1290 li r9,0x002 /* Indicate set reference */
1291 li r2,0x8 ; Set set R, clear none
1292 b hw_pte_comm /* Join in the fun... */
1295 .globl EXT(hw_clr_ref)
1299 li r9,0x001 /* Indicate clear reference bit */
1300 li r2,0x2 ; Set set none, clear R
1301 b hw_pte_comm /* Join in the fun... */
1305 * This is the common stuff.
1310 hw_pte_comm: /* Common routine for pte tests and manips */
1312 #if PERFTIMES && DEBUG
1318 bl EXT(dbgLog2) ; Start of hw_add_map
1322 mfsprg r8,2 ; Get feature flags
1323 lwz r10,pephyslink(r3) /* Get the first mapping block */
1324 mfmsr r0 /* Save the MSR */
1325 rlwinm. r10,r10,0,0,26 ; Clear out the flags from first link and see if we are mapped
1326 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1327 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1328 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1329 mtcrf 0x04,r8 ; Set the features
1330 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1331 beq- comnmap ; No mapping
1332 dcbt br0,r10 ; Touch the first mapping in before the isync
1336 bt pfNoMSRirb,hpcNoMSR ; No MSR...
1338 mtmsr r12 ; Translation and all off
1339 isync ; Toss prefetch
1345 li r0,loadMSR ; Get the MSR setter SC
1346 mr r3,r12 ; Get new MSR
1352 mtcrf 0x05,r9 /* Set the call type flags into cr5 and 7 */
1354 beq- commdone ; Nothing us mapped to this page...
1355 b commnext ; Jump to first pass (jump here so we can align loop)
1359 commnext: lwz r11,mmnext(r10) ; Get the pointer to the next mapping (if any)
1360 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
1361 lwz r5,mmPTEv(r10) /* Get the virtual address */
1362 mr. r11,r11 ; More mappings to go?
1363 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1364 beq- commnxtch ; No more mappings...
1365 dcbt br0,r11 ; Touch the next mapping
1367 commnxtch: li r12,1 /* Get the locked value */
1369 commLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1370 mr. r11,r11 /* Is it locked? */
1371 bne- commLckw1 /* Yeah... */
1372 stwcx. r12,0,r7 /* Try to take it */
1373 bne- commLck1 /* Someone else was trying, try again... */
1374 b commSXg1 /* All done... */
1378 commLckw1: mr. r11,r11 /* Check if it's already held */
1379 beq+ commLck1 /* It's clear... */
1380 lwz r11,0(r7) /* Get lock word again... */
1381 b commLckw1 /* Wait... */
1385 commSXg1: isync /* Make sure we haven't used anything yet */
1387 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
1389 rlwinm r9,r5,1,0,3 /* Move in the segment */
1390 mr. r6,r6 /* See if there is a PTE entry here */
1391 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1393 beq+ commul /* There's no PTE to invalidate... */
1395 xor r8,r8,r6 /* Back hash to virt index */
1396 rlwimi r9,r5,22,4,9 /* Move in the API */
1397 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1398 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1399 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1400 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1402 stw r5,0(r6) /* Make the PTE invalid */
1403 mfspr r4,pvr /* Find out what kind of machine we are */
1404 sync /* Make sure the invalid is stored */
1406 tlbhangco: lwarx r11,0,r12 /* Get the TLBIE lock */
1407 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1408 mr. r11,r11 /* Is it locked? */
1409 lis r5,0x8000 /* Start up a bit mask */
1410 li r11,1 /* Get our lock word */
1411 bne- tlbhangco /* It's locked, go wait... */
1412 stwcx. r11,0,r12 /* Try to get it */
1413 bne- tlbhangco /* We was beat... */
1415 rlwinm r4,r4,16,16,31 /* Isolate CPU type */
1416 li r11,0 /* Lock clear value */
1417 cmplwi r4,3 /* Is this a 603? */
1419 tlbie r9 /* Invalidate it everywhere */
1421 beq- its603co /* It's a 603, skip the tlbsync... */
1423 eieio /* Make sure that the tlbie happens first */
1424 tlbsync /* wait for everyone to catch up */
1427 its603co: stw r11,0(r12) /* Clear the lock */
1428 srw r5,r5,r8 /* Make a "free slot" mask */
1429 sync /* Make sure of it all */
1431 lwz r6,4(r6) /* Get the latest reference and change bits */
1432 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1433 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
1434 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1435 or r9,r9,r5 /* Set the slot free */
1436 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1437 rlwinm r4,r6,0,23,24 /* Extract the RC bits */
1438 andc r9,r9,r8 /* Clear the auto and lock bits */
1439 li r5,pepte1 /* Get displacement to the second word of master pte */
1440 stw r9,PCAallo(r7) /* Store the allocation controls */
1442 commmod: lwarx r11,r5,r3 /* Get the master copy */
1443 or r11,r11,r4 /* Merge in latest RC */
1444 stwcx. r11,r5,r3 /* Save it back */
1445 bne- commmod /* If it changed, try again... */
1446 b commulnl ; Skip loading the old real part...
1448 commul: lwz r6,mmPTEr(r10) ; Get the real part
1450 commulnl: rlwinm r12,r2,5,23,24 ; Get the "set" bits
1451 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1453 or r6,r6,r12 ; Set the bits to come on
1454 andc r6,r6,r11 ; Clear those to come off
1456 stw r6,mmPTEr(r10) ; Set the new RC
1458 lwz r10,mmnext(r10) /* Get the next */
1459 li r4,0 /* Make sure this is 0 */
1460 mr. r10,r10 ; Is there another mapping?
1462 sync ; Make sure that all is saved
1464 stw r4,0(r7) /* Unlock the hash chain */
1465 bne+ commnext ; Go get the next if there is one...
1468 * Now that all PTEs have been invalidated and the master RC bits are updated,
1469 * we go ahead and figure out what the original call was and do that. Note that
1470 * another processor could be messing around and may have entered one of the
1471 * PTEs we just removed into the hash table. Too bad... You takes yer chances.
1472 * If there's a problem with that, it's because some higher level was trying to
1473 * do something with a mapping that it shouldn't. So, the problem's really
1474 * there, nyaaa, nyaaa, nyaaa... nyaaa, nyaaa... nyaaa! So there!
1477 commdone: li r5,pepte1 /* Get displacement to the second word of master pte */
1478 blt cr5,commfini /* We're finished, it was invalidate all... */
1479 bgt cr5,commtst /* It was a test modified... */
1480 beq cr5,commtst /* It was a test reference... */
1483 * Note that we need to to do the interlocked update here because another processor
1484 * can be updating the reference and change bits even though the physical entry
1485 * is locked. All modifications to the PTE portion of the physical entry must be
1486 * done via interlocked update.
1489 rlwinm r12,r2,5,23,24 ; Get the "set" bits
1490 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1492 commcng: lwarx r8,r5,r3 /* Get the master copy */
1493 or r8,r8,r12 ; Set the bits to come on
1494 andc r8,r8,r11 ; Clear those to come off
1495 stwcx. r8,r5,r3 /* Save it back */
1496 bne- commcng /* If it changed, try again... */
1498 mtmsr r0 /* Interrupts and translation back on */
1500 #if PERFTIMES && DEBUG
1504 bl EXT(dbgLog2) ; Start of hw_add_map
1512 commtst: lwz r8,pepte1(r3) /* Get the PTE */
1513 bne- cr5,commtcb ; This is for the change bit...
1514 mtmsr r0 ; Interrupts and translation back on
1515 rlwinm r3,r8,24,31,31 ; Copy reference bit to bit 31
1516 isync ; Toss prefetching
1517 #if PERFTIMES && DEBUG
1521 bl EXT(dbgLog2) ; Start of hw_add_map
1529 commtcb: rlwinm r3,r8,25,31,31 ; Copy change bit to bit 31
1531 commfini: mtmsr r0 ; Interrupts and translation back on
1532 isync ; Toss prefetching
1534 #if PERFTIMES && DEBUG
1538 bl EXT(dbgLog2) ; Start of hw_add_map
1545 * unsigned int hw_test_rc(mapping *mp, boolean_t reset);
1547 * Test the RC bits for a specific mapping. If reset is non-zero, clear them.
1548 * We return the RC value in the mapping if there is no PTE or if C is set.
1549 * (Note: R is always set with C.) Otherwise we invalidate the PTE and
1550 * collect the RC bits from there, also merging them into the global copy.
1552 * For now, we release the PTE slot and leave it invalid. In the future, we
1553 * may consider re-validating and not releasing the slot. It would be faster,
1554 * but our current implementation says that we will have not PTEs valid
1555 * without the reference bit set.
1557 * We will special case C==1 && not reset to just return the RC.
1559 * Probable state is worst performance state: C bit is off and there is a PTE.
1565 .globl EXT(hw_test_rc)
1569 mfsprg r9,2 ; Get feature flags
1570 mfmsr r0 ; Save the MSR
1571 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1572 mr. r4,r4 ; See if we have a reset to do later
1573 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1574 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruption mask
1575 crnot htrReset,cr0_eq ; Remember reset
1576 mtcrf 0x04,r9 ; Set the features
1577 rlwinm r12,r12,0,28,25 ; Clear IR and DR
1579 bt pfNoMSRirb,htrNoMSR ; No MSR...
1581 mtmsr r12 ; Translation and all off
1582 isync ; Toss prefetch
1588 li r0,loadMSR ; Get the MSR setter SC
1589 mr r3,r12 ; Get new MSR
1595 lwz r2,mmPTEr(r3) ; Get the real part
1596 lwz r7,mmPTEhash(r3) ; Get pointer to hash list anchor
1597 rlwinm. r12,r2,0,24,24 ; Is the change bit on?
1598 lwz r5,mmPTEv(r3) ; Get the virtual address
1599 crnor cr0_eq,cr0_eq,htrReset ; Set if C=1 && not reset
1600 rlwinm r7,r7,0,0,25 ; Round hash list down to PCA boundary
1601 bt cr0_eq,htrcset ; Special case changed but no reset case...
1603 li r12,1 ; Get the locked value
1605 htrLck1: lwarx r11,0,r7 ; Get the PTEG lock
1606 mr. r11,r11 ; Is it locked?
1607 bne- htrLckw1 ; Yeah...
1608 stwcx. r12,0,r7 ; Try to take it
1609 bne- htrLck1 ; Someone else was trying, try again...
1610 b htrSXg1 ; All done...
1614 htrLckw1: mr. r11,r11 ; Check if it is already held
1615 beq+ htrLck1 ; It is clear...
1616 lwz r11,0(r7) ; Get lock word again...
1617 b htrLckw1 ; Wait...
1621 htrSXg1: isync ; Make sure we have not used anything yet
1623 lwz r6,mmPTEent(r3) ; Get the pointer to the PTE now that the lock is set
1624 lwz r2,mmPTEr(r3) ; Get the mapping copy of the real part
1626 rlwinm r9,r5,1,0,3 ; Move in the segment
1627 mr. r6,r6 ; Any PTE to invalidate?
1628 rlwinm r8,r5,31,2,25 ; Line it up
1630 beq+ htrnopte ; There is no PTE to invalidate...
1632 xor r8,r8,r6 ; Back hash to virt index
1633 rlwimi r9,r5,22,4,9 ; Move in the API
1634 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
1635 rlwinm r5,r5,0,1,31 ; Clear the valid bit
1636 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
1637 mfspr r11,pvr ; Find out what kind of machine we are
1638 rlwimi r9,r8,6,10,19 ; Create the virtual address
1639 rlwinm r11,r11,16,16,31 ; Isolate CPU type
1641 stw r5,0(r6) ; Make the PTE invalid
1642 cmplwi cr1,r11,3 ; Is this a 603?
1643 sync ; Make sure the invalid is stored
1645 htrtlbhang: lwarx r11,0,r12 ; Get the TLBIE lock
1646 rlwinm r8,r6,29,29,31 ; Get the bit position of entry
1647 mr. r11,r11 ; Is it locked?
1648 lis r5,0x8000 ; Start up a bit mask
1649 li r11,1 ; Get our lock word
1650 bne- htrtlbhang ; It is locked, go wait...
1651 stwcx. r11,0,r12 ; Try to get it
1652 bne- htrtlbhang ; We was beat...
1654 li r11,0 ; Lock clear value
1656 tlbie r9 ;Invalidate it everywhere
1658 beq- cr1,htr603 ; It is a 603, skip the tlbsync...
1660 eieio ; Make sure that the tlbie happens first
1661 tlbsync ; wait for everyone to catch up
1664 htr603: stw r11,0(r12) ; Clear the lock
1665 srw r5,r5,r8 ; Make a "free slot" mask
1666 sync ; Make sure of it all
1668 lwz r6,4(r6) ; Get the latest reference and change bits
1669 stw r11,mmPTEent(r3) ; Clear the pointer to the PTE
1670 rlwinm r6,r6,0,23,24 ; Extract the RC bits
1671 lwz r9,PCAallo(r7) ; Get the allocation control bits
1672 rlwinm r8,r5,24,8,15 ; Make the autogen bit to turn off
1673 lwz r10,mmphysent(r3) ; Get any physical entry
1674 or r9,r9,r5 ; Set the slot free
1675 rlwimi r8,r8,24,16,23 ; Get lock bit mask to turn it off
1676 andc r9,r9,r8 ; Clear the auto and lock bits
1677 mr. r10,r10 ; Is there a physical entry?
1678 li r5,pepte1 ; Get displacement to the second word of master pte
1679 stw r9,PCAallo(r7) ; Store the allocation controls
1680 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1681 beq- htrnopte ; No physical entry...
1683 htrmrc: lwarx r11,r5,r10 ; Get the master copy
1684 or r11,r11,r6 ; Merge in latest RC
1685 stwcx. r11,r5,r10 ; Save it back
1686 bne- htrmrc ; If it changed, try again...
1688 htrnopte: rlwinm r5,r2,25,30,31 ; Position RC and mask off
1689 bf htrReset,htrnorst ; No reset to do...
1690 rlwinm r2,r2,0,25,22 ; Clear the RC if requested
1692 htrnorst: li r4,0 ; Get a 0
1693 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1695 sync ; Make sure that stuff is all stored
1697 stw r4,0(r7) ; Unlock the hash chain
1699 mr r3,r5 ; Get the old RC to pass back
1700 mtmsr r0 ; Restore interrupts and translation
1706 htrcset: rlwinm r3,r2,25,30,31 ; Position RC and mask off
1707 mtmsr r0 ; Restore interrupts and translation
1713 * hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) - Sets the default physical page attributes
1715 * Note that this must be done with both interruptions off and VM off
1716 * Move the passed in attributes into the pte image in the phys entry
1722 .globl EXT(hw_phys_attr)
1726 #if PERFTIMES && DEBUG
1734 bl EXT(dbgLog2) ; Start of hw_add_map
1740 mfsprg r9,2 ; Get feature flags
1741 mfmsr r0 /* Save the MSR */
1742 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1743 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1744 andi. r5,r5,0x0078 /* Clean up the WIMG */
1745 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1746 mtcrf 0x04,r9 ; Set the features
1747 rlwimi r5,r4,0,30,31 /* Move the protection into the wimg register */
1748 la r6,pepte1(r3) /* Point to the default pte */
1749 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1751 bt pfNoMSRirb,hpaNoMSR ; No MSR...
1753 mtmsr r12 ; Translation and all off
1754 isync ; Toss prefetch
1760 li r0,loadMSR ; Get the MSR setter SC
1761 mr r3,r12 ; Get new MSR
1767 atmattr: lwarx r10,0,r6 /* Get the pte */
1768 rlwimi r10,r5,0,25,31 /* Move in the new attributes */
1769 stwcx. r10,0,r6 /* Try it on for size */
1770 bne- atmattr /* Someone else was trying, try again... */
1772 mtmsr r0 /* Interrupts and translation back on */
1774 #if PERFTIMES && DEBUG
1778 bl EXT(dbgLog2) ; Start of hw_add_map
1781 blr /* All done... */
1786 * handlePF - handle a page fault interruption
1788 * If the fault can be handled, this routine will RFI directly,
1789 * otherwise it will return with all registers as in entry.
1791 * Upon entry, state and all registers have been saved in savearea.
1792 * This is pointed to by R13.
1793 * IR and DR are off, interrupts are masked,
1794 * Floating point be disabled.
1795 * R3 is the interrupt code.
1797 * If we bail, we must restore cr5, and all registers except 6 and
1803 .globl EXT(handlePF)
1808 * This first part does a quick check to see if we can handle the fault.
1809 * We can't handle any kind of protection exceptions here, so we pass
1810 * them up to the next level.
1812 * The mapping lists are kept in MRS (most recently stolen)
1813 * order on queues anchored within from the
1814 * PTEG to which the virtual address hashes. This is further segregated by
1815 * the low-order 3 bits of the VSID XORed with the segment number and XORed
1816 * with bits 4-7 of the vaddr in an attempt to keep the searches
1819 * MRS is handled by moving the entry to the head of its list when stolen in the
1820 * assumption that it will be revalidated soon. Entries are created on the head
1821 * of the list because they will be used again almost immediately.
1823 * We need R13 set to the savearea, R3 set to the interrupt code, and R2
1824 * set to the per_proc.
1826 * NOTE: In order for a page-fault redrive to work, the translation miss
1827 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
1828 * before we come here.
1831 cmplwi r3,T_INSTRUCTION_ACCESS /* See if this is for the instruction */
1832 lwz r8,savesrr1(r13) ; Get the MSR to determine mode
1833 beq- gotIfetch ; We have an IFETCH here...
1835 lwz r7,savedsisr(r13) /* Get the DSISR */
1836 lwz r6,savedar(r13) /* Get the fault address */
1837 b ckIfProt ; Go check if this is a protection fault...
1839 gotIfetch: mr r7,r8 ; IFETCH info is in SRR1
1840 lwz r6,savesrr0(r13) /* Get the instruction address */
1842 ckIfProt: rlwinm. r7,r7,0,1,1 ; Is this a protection exception?
1843 beqlr- ; Yes... (probably not though)
1846 * We will need to restore registers if we bail after this point.
1847 * Note that at this point several SRs have been changed to the kernel versions.
1848 * Therefore, for these we must build these values.
1851 #if PERFTIMES && DEBUG
1856 bl EXT(dbgLog2) ; Start of hw_add_map
1861 lwz r3,PP_USERPMAP(r2) ; Get the user pmap (not needed if kernel access, but optimize for user??)
1862 rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Supervisor state access?
1863 rlwinm r5,r6,6,26,29 ; Get index to the segment slot
1864 eqv r1,r1,r1 ; Fill the bottom with foxes
1865 bne+ notsuper ; Go do the user mode interrupt stuff...
1867 cmplwi cr1,r5,SR_COPYIN_NUM*4 ; See if this is the copyin/copyout segment
1868 rlwinm r3,r6,24,8,11 ; Make the kernel VSID
1869 bne+ cr1,havevsid ; We are done if we do not want the copyin/out guy...
1871 mfsr r3,SR_COPYIN ; Get the copy vsid
1872 b havevsid ; Join up...
1876 notsuper: addi r5,r5,PMAP_SEGS ; Get offset to table
1877 lwzx r3,r3,r5 ; Get the VSID
1879 havevsid: mfspr r5,sdr1 /* Get hash table base and size */
1880 cror cr1_eq,cr0_eq,cr0_eq ; Remember if kernel fault for later
1881 rlwinm r9,r6,2,2,5 ; Move nybble 1 up to 0 (keep aligned with VSID)
1882 rlwimi r1,r5,16,0,15 /* Make table size -1 out of mask */
1883 rlwinm r3,r3,6,2,25 /* Position the space for the VSID */
1884 rlwinm r7,r6,26,10,25 /* Isolate the page index */
1885 xor r9,r9,r3 ; Splooch vaddr nybble 0 (from VSID) and 1 together
1886 or r8,r5,r1 /* Point to the last byte in table */
1887 xor r7,r7,r3 /* Get primary hash */
1888 rlwinm r3,r3,1,1,24 /* Position VSID for pte ID */
1889 addi r8,r8,1 /* Point to the PTEG Control Area */
1890 rlwinm r9,r9,8,27,29 ; Get splooched bits in place
1891 and r7,r7,r1 /* Wrap the hash */
1892 rlwimi r3,r6,10,26,31 /* Move API into pte ID */
1893 add r8,r8,r7 /* Point to our PCA entry */
1894 rlwinm r12,r3,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
1895 la r11,PCAhash(r8) /* Point to the mapping hash area */
1896 xor r9,r9,r12 ; Finish splooching nybble 0, 1, and the low bits of the VSID
1900 * We have about as much as we need to start searching the autogen (aka block maps)
1901 * and mappings. From here on, any kind of failure will bail, and
1902 * contention will either bail or restart from here.
1907 li r12,1 /* Get the locked value */
1908 dcbt 0,r11 /* We'll need the hash area in a sec, so get it */
1909 add r11,r11,r9 /* Point to the right mapping hash slot */
1911 ptegLck: lwarx r10,0,r8 /* Get the PTEG lock */
1912 mr. r10,r10 /* Is it locked? */
1913 bne- ptegLckw /* Yeah... */
1914 stwcx. r12,0,r8 /* Take take it */
1915 bne- ptegLck /* Someone else was trying, try again... */
1916 b ptegSXg /* All done... */
1920 ptegLckw: mr. r10,r10 /* Check if it's already held */
1921 beq+ ptegLck /* It's clear... */
1922 lwz r10,0(r8) /* Get lock word again... */
1923 b ptegLckw /* Wait... */
1927 nop ; Force ISYNC to last instruction in IFETCH
1931 ptegSXg: isync /* Make sure we haven't used anything yet */
1933 lwz r9,0(r11) /* Pick up first mapping block */
1934 mr r5,r11 /* Get the address of the anchor */
1935 mr r7,r9 /* Save the first in line */
1936 b findmap ; Take space and force loop to cache line
1938 findmap: mr. r12,r9 /* Are there more? */
1939 beq- tryAuto /* Nope, nothing in mapping list for us... */
1941 lwz r10,mmPTEv(r12) /* Get unique PTE identification */
1942 lwz r9,mmhashnext(r12) /* Get the chain, just in case */
1943 cmplw r10,r3 /* Did we hit our PTE? */
1944 lwz r0,mmPTEent(r12) /* Get the pointer to the hash table entry */
1945 mr r5,r12 /* Save the current as previous */
1946 bne- findmap ; Nothing here, try the next...
1948 ; Cache line boundary here
1950 cmplwi cr1,r0,0 /* Is there actually a PTE entry in the hash? */
1951 lwz r2,mmphysent(r12) /* Get the physical entry */
1952 bne- cr1,MustBeOK /* There's an entry in the hash table, so, this must
1953 have been taken care of already... */
1954 lis r4,0x8000 ; Tell PTE inserter that this was not an auto
1955 cmplwi cr2,r2,0 /* Is there a physical entry? */
1956 li r0,0x0100 /* Force on the reference bit whenever we make a PTE valid */
1957 bne+ cr2,gotphys /* Skip down if we have a physical entry */
1958 li r0,0x0180 /* When there is no physical entry, force on
1959 both R and C bits to keep hardware from
1960 updating the PTE to set them. We don't
1961 keep track of RC for I/O areas, so this is ok */
1963 gotphys: lwz r2,mmPTEr(r12) ; Get the second part of the PTE
1964 b insert /* Go insert into the PTEG... */
1966 MustBeOK: li r10,0 /* Get lock clear value */
1967 li r3,T_IN_VAIN /* Say that we handled it */
1968 stw r10,PCAlock(r8) /* Clear the PTEG lock */
1970 #if PERFTIMES && DEBUG
1974 bl EXT(dbgLog2) ; Start of hw_add_map
1978 blr /* Blow back and handle exception */
1983 * We couldn't find it in the mapping list. As a last try, we will
1984 * see if we can autogen it from the block mapped list.
1986 * A block mapped area is defined as a contiguous virtual area that is mapped to
1987 * a contiguous physical area. The olde-tyme IBM VM/XA Interpretive Execution
1988 * architecture referred to this as a V=F, or Virtual = Fixed area.
1990 * We consider a V=F area to be a single entity, adjacent areas can not be merged
1991 * or overlapped. The protection and memory attributes are the same and reference
1992 * and change indications are not kept. The areas are not considered part of the
1993 * physical RAM of the machine and do not have any associated physical table
1994 * entries. Their primary use is intended for mapped I/O areas (e.g., framebuffers)
1995 * although certain areas of RAM, such as the kernel V=R memory, can be mapped.
1997 * We also have a problem in the case of copyin/out: that access is done
1998 * within the kernel for a user address. Unfortunately, the user isn't
1999 * necessarily the current guy. That means that we don't have access to the
2000 * right autogen list. We can't support this kind of access. So, we need to do
2001 * a quick check here and cause a fault if an attempt to copyin or out to
2002 * any autogenned area.
2004 * The lists must be kept short.
2006 * NOTE: kernel_pmap_store must be in V=R storage!!!!!!!!!!!!!!
2011 tryAuto: rlwinm. r11,r3,0,5,24 ; Check if this is a kernel VSID
2012 lis r10,HIGH_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the top part of kernel block map anchor
2013 crandc cr0_eq,cr1_eq,cr0_eq ; Set if kernel access and non-zero VSID (copyin or copyout)
2014 mfsprg r11,0 ; Get the per_proc area
2015 beq- cr0,realFault ; Can not autogen for copyin/copyout...
2016 ori r10,r10,LOW_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the bottom part
2017 beq- cr1,bmInKernel ; We are in kernel... (cr1 set way back at entry)
2019 lwz r10,PP_USERPMAP(r11) ; Get the user pmap
2020 la r10,PMAP_BMAPS(r10) ; Point to the chain anchor
2021 b bmInKernel ; Jump over alignment gap...
2029 #ifndef CHIP_ERRATA_MAX_V1
2031 #endif /* CHIP_ERRATA_MAX_V1 */
2033 bmapLck: lwarx r9,0,r10 ; Get the block map anchor and lock
2034 rlwinm. r5,r9,0,31,31 ; Is it locked?
2035 ori r5,r5,1 ; Set the lock
2036 bne- bmapLckw ; Yeah...
2037 stwcx. r5,0,r10 ; Lock the bmap list
2038 bne- bmapLck ; Someone else was trying, try again...
2039 b bmapSXg ; All done...
2043 bmapLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2044 beq+ bmapLck ; Not no more...
2045 lwz r9,0(r10) ; Get lock word again...
2046 b bmapLckw ; Check it out...
2050 nop ; Force ISYNC to last instruction in IFETCH
2054 bmapSXg: rlwinm. r4,r9,0,0,26 ; Clear out flags and lock
2055 isync ; Make sure we have not used anything yet
2056 bne+ findAuto ; We have something, let us go...
2058 bmapNone: stw r9,0(r10) ; Unlock it, we have nothing here
2059 ; No sync here because we have not changed anything
2062 * When we come here, we know that we can't handle this. Restore whatever
2063 * state that we trashed and go back to continue handling the interrupt.
2066 realFault: li r10,0 /* Get lock clear value */
2067 lwz r3,saveexception(r13) /* Figure out the exception code again */
2068 stw r10,PCAlock(r8) /* Clear the PTEG lock */
2069 #if PERFTIMES && DEBUG
2073 bl EXT(dbgLog2) ; Start of hw_add_map
2077 blr /* Blow back and handle exception */
2081 findAuto: mr. r4,r4 ; Is there more?
2082 beq- bmapNone ; No more...
2083 lwz r5,bmstart(r4) ; Get the bottom of range
2084 lwz r11,bmend(r4) ; Get the top of range
2085 cmplw cr0,r6,r5 ; Are we before the entry?
2086 cmplw cr1,r6,r11 ; Are we after the entry?
2087 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2088 bne+ cr1,faGot ; Found it...
2090 lwz r4,bmnext(r4) ; Get the next one
2091 b findAuto ; Check it out...
2094 lwz r7,blkFlags(r4) ; Get the flags
2095 rlwinm. r7,r7,0,blkRembit,blkRembit ; is this mapping partially removed
2096 bne bmapNone ; Pending remove, bail out
2097 rlwinm r6,r6,0,0,19 ; Round to page
2098 lwz r2,bmPTEr(r4) ; Get the real part of the PTE
2099 sub r5,r6,r5 ; Get offset into area
2100 stw r9,0(r10) ; Unlock it, we are done with it (no sync needed)
2101 add r2,r2,r5 ; Adjust the real address
2103 lis r4,0x8080 /* Indicate that this was autogened */
2104 li r0,0x0180 /* Autogenned areas always set RC bits.
2105 This keeps the hardware from having
2106 to do two storage writes */
2109 * Here where we insert the PTE into the hash. The PTE image is in R3, R2.
2110 * The PTEG allocation controls are a bit map of the state of the PTEG. The
2111 * PCAlock bits are a temporary lock for the specified PTE. PCAfree indicates that
2112 * the PTE slot is empty. PCAauto means that it comes from an autogen area. These
2113 * guys do not keep track of reference and change and are actually "wired".
2114 * They're easy to maintain. PCAsteal
2115 * is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
2116 * fields fit in a single word and are loaded and stored under control of the
2117 * PTEG control area lock (PCAlock).
2119 * Note that PCAauto does not contribute to the steal calculations at all. Originally
2120 * it did, autogens were second in priority. This can result in a pathalogical
2121 * case where an instruction can not make forward progress, or one PTE slot
2124 * Physically, the fields are arranged:
2131 insert: lwz r10,PCAallo(r8) /* Get the PTEG controls */
2132 eqv r6,r6,r6 /* Get all ones */
2133 mr r11,r10 /* Make a copy */
2134 rlwimi r6,r10,8,16,23 /* Insert sliding steal position */
2135 rlwimi r11,r11,24,24,31 /* Duplicate the locked field */
2136 addi r6,r6,-256 /* Form mask */
2137 rlwimi r11,r11,16,0,15 /* This gives us a quadrupled lock mask */
2138 rlwinm r5,r10,31,24,0 /* Slide over the mask for next time */
2139 mr r9,r10 /* Make a copy to test */
2140 not r11,r11 /* Invert the quadrupled lock */
2141 or r2,r2,r0 /* Force on R, and maybe C bit */
2142 and r9,r9,r11 /* Remove the locked guys */
2143 rlwimi r5,r5,8,24,24 /* Wrap bottom bit to top in mask */
2144 rlwimi r9,r11,0,16,31 /* Put two copies of the unlocked entries at the end */
2145 rlwinm r6,r6,0,16,7 ; Remove the autogens from the priority calculations
2146 rlwimi r10,r5,0,24,31 /* Move steal map back in */
2147 and r9,r9,r6 /* Set the starting point for stealing */
2149 /* So, now we have in R9:
2150 byte 0 = ~locked & free
2152 byte 2 = ~locked & (PCAsteal - 1)
2155 Each bit position represents (modulo 8) a PTE. If it is 1, it is available for
2156 allocation at its priority level, left to right.
2158 Additionally, the PCA steal field in R10 has been rotated right one bit.
2162 rlwinm r21,r10,8,0,7 ; Isolate just the old autogen bits
2163 cntlzw r6,r9 /* Allocate a slot */
2164 mr r14,r12 /* Save our mapping for later */
2165 cmplwi r6,32 ; Was there anything available?
2166 rlwinm r7,r6,29,30,31 /* Get the priority slot we got this from */
2167 rlwinm r6,r6,0,29,31 ; Isolate bit position
2168 srw r11,r4,r6 /* Position the PTEG control bits */
2169 slw r21,r21,r6 ; Move corresponding old autogen flag to bit 0
2170 mr r22,r11 ; Get another copy of the selected slot
2172 beq- realFault /* Arghh, no slots! Take the long way 'round... */
2174 /* Remember, we've already set up the mask pattern
2175 depending upon how we got here:
2176 if got here from simple mapping, R4=0x80000000,
2177 if we got here from autogen it is 0x80800000. */
2179 rlwinm r6,r6,3,26,28 /* Start calculating actual PTE address */
2180 rlwimi r22,r22,24,8,15 ; Duplicate selected slot in second byte
2181 rlwinm. r11,r11,0,8,15 /* Isolate just the auto bit (remember about it too) */
2182 andc r10,r10,r22 /* Turn off the free and auto bits */
2183 add r6,r8,r6 /* Get position into PTEG control area */
2184 cmplwi cr1,r7,1 /* Set the condition based upon the old PTE type */
2185 sub r6,r6,r1 /* Switch it to the hash table */
2186 or r10,r10,r11 /* Turn auto on if it is (PTEG control all set up now) */
2187 subi r6,r6,1 /* Point right */
2188 stw r10,PCAallo(r8) /* Allocate our slot */
2189 dcbt br0,r6 ; Touch in the PTE
2190 bne wasauto /* This was autogenned... */
2192 stw r6,mmPTEent(r14) /* Link the mapping to the PTE slot */
2195 * So, now we're here and what exactly do we have? We've got:
2196 * 1) a full PTE entry, both top and bottom words in R3 and R2
2197 * 2) an allocated slot in the PTEG.
2198 * 3) R8 still points to the PTEG Control Area (PCA)
2199 * 4) R6 points to the PTE entry.
2200 * 5) R1 contains length of the hash table-1. We use this to back-translate
2201 * a PTE to a virtual address so we can invalidate TLBs.
2202 * 6) R11 has a copy of the PCA controls we set.
2203 * 7a) R7 indicates what the PTE slot was before we got to it. 0 shows
2204 * that it was empty and 2 or 3, that it was
2205 * a we've stolen a live one. CR1 is set to LT for empty and GT
2207 * 7b) Bit 0 of R21 is 1 if the stolen PTE was autogenned
2208 * 8) So far as our selected PTE, it should be valid if it was stolen
2209 * and invalid if not. We could put some kind of assert here to
2210 * check, but I think that I'd rather leave it in as a mysterious,
2211 * non-reproducable bug.
2212 * 9) The new PTE's mapping has been moved to the front of its PTEG hash list
2213 * so that it's kept in some semblance of a MRU list.
2214 * 10) R14 points to the mapping we're adding.
2216 * So, what do we have to do yet?
2217 * 1) If we stole a slot, we need to invalidate the PTE completely.
2218 * 2) If we stole one AND it was not an autogen,
2219 * copy the entire old PTE (including R and C bits) to its mapping.
2220 * 3) Set the new PTE in the PTEG and make sure it is valid.
2221 * 4) Unlock the PTEG control area.
2222 * 5) Go back to the interrupt handler, changing the interrupt
2223 * code to "in vain" which will restore the registers and bail out.
2226 wasauto: oris r3,r3,0x8000 /* Turn on the valid bit */
2227 blt+ cr1,slamit /* It was empty, go slam it on in... */
2229 lwz r10,0(r6) /* Grab the top part of the PTE */
2230 rlwinm r12,r6,6,4,19 /* Match up the hash to a page boundary */
2231 rlwinm r5,r10,5,4,19 /* Extract the VSID to a page boundary */
2232 rlwinm r10,r10,0,1,31 /* Make it invalid */
2233 xor r12,r5,r12 /* Calculate vaddr */
2234 stw r10,0(r6) /* Invalidate the PTE */
2235 rlwinm r5,r10,7,27,29 ; Move nybble 0 up to subhash position
2236 rlwimi r12,r10,1,0,3 /* Move in the segment portion */
2237 lis r9,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
2238 xor r5,r5,r10 ; Splooch nybble 0 and 1
2239 rlwimi r12,r10,22,4,9 /* Move in the API */
2240 ori r9,r9,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
2241 rlwinm r4,r10,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
2243 sync /* Make sure the invalid is stored */
2245 xor r4,r4,r5 ; Finish splooching nybble 0, 1, and the low bits of the VSID
2247 tlbhang: lwarx r5,0,r9 /* Get the TLBIE lock */
2249 rlwinm r4,r4,0,27,29 ; Clean up splooched hash value
2251 mr. r5,r5 /* Is it locked? */
2252 add r4,r4,r8 /* Point to the offset into the PCA area */
2253 li r5,1 /* Get our lock word */
2254 bne- tlbhang /* It's locked, go wait... */
2256 la r4,PCAhash(r4) /* Point to the start of the hash chain for the PTE we're replacing */
2258 stwcx. r5,0,r9 /* Try to get it */
2259 bne- tlbhang /* We was beat... */
2261 mfspr r7,pvr /* Find out what kind of machine we are */
2262 li r5,0 /* Lock clear value */
2263 rlwinm r7,r7,16,16,31 /* Isolate CPU type */
2265 tlbie r12 /* Invalidate it everywhere */
2267 cmplwi r7,3 /* Is this a 603? */
2268 stw r5,0(r9) /* Clear the lock */
2270 beq- its603 /* It's a 603, skip the tlbsync... */
2272 eieio /* Make sure that the tlbie happens first */
2273 tlbsync /* wait for everyone to catch up */
2276 its603: rlwinm. r21,r21,0,0,0 ; See if we just stole an autogenned entry
2277 sync /* Make sure of it all */
2279 bne slamit ; The old was an autogen, time to slam the new in...
2281 lwz r9,4(r6) /* Get the real portion of old PTE */
2282 lwz r7,0(r4) /* Get the first element. We can't get to here
2283 if we aren't working with a mapping... */
2284 mr r0,r7 ; Save pointer to first element
2286 findold: mr r1,r11 ; Save the previous guy
2287 mr. r11,r7 /* Copy and test the chain */
2288 beq- bebad /* Assume it's not zero... */
2290 lwz r5,mmPTEv(r11) /* See if this is the old active one */
2291 cmplw cr2,r11,r14 /* Check if this is actually the new one */
2292 cmplw r5,r10 /* Is this us? (Note: valid bit kept off in mappings) */
2293 lwz r7,mmhashnext(r11) /* Get the next one in line */
2294 beq- cr2,findold /* Don't count the new one... */
2295 cmplw cr2,r11,r0 ; Check if we are first on the list
2296 bne+ findold /* Not it (and assume the worst)... */
2298 lwz r12,mmphysent(r11) /* Get the pointer to the physical entry */
2299 beq- cr2,nomove ; We are first, no need to requeue...
2301 stw r11,0(r4) ; Chain us to the head
2302 stw r0,mmhashnext(r11) ; Chain the old head to us
2303 stw r7,mmhashnext(r1) ; Unlink us
2305 nomove: li r5,0 /* Clear this on out */
2307 mr. r12,r12 /* Is there a physical entry? */
2308 stw r5,mmPTEent(r11) ; Clear the PTE entry pointer
2309 li r5,pepte1 /* Point to the PTE last half */
2310 stw r9,mmPTEr(r11) ; Squirrel away the whole thing (RC bits are in here)
2312 beq- mrgmrcx ; No physical entry for this one...
2314 rlwinm r11,r9,0,23,24 /* Keep only the RC bits */
2316 mrgmrcx: lwarx r9,r5,r12 /* Get the master copy */
2317 or r9,r9,r11 /* Merge in latest RC */
2318 stwcx. r9,r5,r12 /* Save it back */
2319 bne- mrgmrcx /* If it changed, try again... */
2322 * Here's where we finish up. We save the real part of the PTE, eieio it, to make sure it's
2323 * out there before the top half (with the valid bit set).
2326 slamit: stw r2,4(r6) /* Stash the real part */
2327 li r4,0 /* Get a lock clear value */
2328 eieio /* Erect a barricade */
2329 stw r3,0(r6) /* Stash the virtual part and set valid on */
2331 stw r4,PCAlock(r8) /* Clear the PCA lock */
2333 li r3,T_IN_VAIN /* Say that we handled it */
2334 sync /* Go no further until the stores complete */
2335 #if PERFTIMES && DEBUG
2339 bl EXT(dbgLog2) ; Start of hw_add_map
2343 blr /* Back to the fold... */
2345 bebad: lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
2346 ori r0,r0,LOW_ADDR(Choke)
2347 sc /* Firmware Heimlich maneuver */
2350 * This walks the hash table or DBATs to locate the physical address of a virtual one.
2351 * The space is provided. If it is the kernel space, the DBATs are searched first. Failing
2352 * that, the hash table is accessed. Zero is returned for failure, so it must be special cased.
2353 * This is usually used for debugging, so we try not to rely
2354 * on anything that we don't have to.
2357 ENTRY(LRA, TAG_NO_FRAME_USED)
2359 mfsprg r8,2 ; Get feature flags
2360 mfmsr r10 /* Save the current MSR */
2361 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2362 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2363 mtcrf 0x04,r8 ; Set the features
2364 xoris r5,r3,HIGH_ADDR(PPC_SID_KERNEL) /* Clear the top half if equal */
2365 andi. r9,r10,0x7FCF /* Turn off interrupts and translation */
2366 eqv r12,r12,r12 /* Fill the bottom with foxes */
2368 bt pfNoMSRirb,lraNoMSR ; No MSR...
2370 mtmsr r9 ; Translation and all off
2371 isync ; Toss prefetch
2376 li r0,loadMSR ; Get the MSR setter SC
2377 mr r3,r9 ; Get new MSR
2382 cmplwi r5,LOW_ADDR(PPC_SID_KERNEL) /* See if this is kernel space */
2383 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
2384 isync /* Purge pipe */
2385 bne- notkernsp /* This is not for the kernel... */
2387 mfspr r5,dbat0u /* Get the virtual address and length */
2388 eqv r8,r8,r8 /* Get all foxes */
2389 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2390 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2391 beq- ckbat1 /* not valid, skip this one... */
2392 sub r7,r4,r7 /* Subtract out the base */
2393 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2394 mfspr r6,dbat0l /* Get the real part */
2395 cmplw r7,r8 /* Check if it is in the range */
2396 bng+ fndbat /* Yup, she's a good un... */
2398 ckbat1: mfspr r5,dbat1u /* Get the virtual address and length */
2399 eqv r8,r8,r8 /* Get all foxes */
2400 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2401 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2402 beq- ckbat2 /* not valid, skip this one... */
2403 sub r7,r4,r7 /* Subtract out the base */
2404 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2405 mfspr r6,dbat1l /* Get the real part */
2406 cmplw r7,r8 /* Check if it is in the range */
2407 bng+ fndbat /* Yup, she's a good un... */
2409 ckbat2: mfspr r5,dbat2u /* Get the virtual address and length */
2410 eqv r8,r8,r8 /* Get all foxes */
2411 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2412 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2413 beq- ckbat3 /* not valid, skip this one... */
2414 sub r7,r4,r7 /* Subtract out the base */
2415 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2416 mfspr r6,dbat2l /* Get the real part */
2417 cmplw r7,r8 /* Check if it is in the range */
2418 bng- fndbat /* Yup, she's a good un... */
2420 ckbat3: mfspr r5,dbat3u /* Get the virtual address and length */
2421 eqv r8,r8,r8 /* Get all foxes */
2422 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2423 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2424 beq- notkernsp /* not valid, skip this one... */
2425 sub r7,r4,r7 /* Subtract out the base */
2426 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2427 mfspr r6,dbat3l /* Get the real part */
2428 cmplw r7,r8 /* Check if it is in the range */
2429 bgt+ notkernsp /* No good... */
2431 fndbat: rlwinm r6,r6,0,0,14 /* Clean up the real address */
2432 mtmsr r10 /* Restore state */
2433 add r3,r7,r6 /* Relocate the offset to real */
2434 isync /* Purge pipe */
2435 blr /* Bye, bye... */
2437 notkernsp: mfspr r5,sdr1 /* Get hash table base and size */
2438 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
2439 rlwimi r12,r5,16,0,15 /* Make table size -1 out of mask */
2440 rlwinm r7,r4,26,10,25 /* Isolate the page index */
2441 andc r5,r5,r12 /* Clean up the hash table */
2442 xor r7,r7,r11 /* Get primary hash */
2443 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
2444 and r7,r7,r12 /* Wrap the hash */
2445 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
2446 add r5,r7,r5 /* Point to the PTEG */
2447 oris r11,r11,0x8000 /* Slam on valid bit so's we don't match an invalid one */
2449 li r9,8 /* Get the number of PTEs to check */
2450 lwz r6,0(r5) /* Preload the virtual half */
2452 fndpte: subi r9,r9,1 /* Count the pte */
2453 lwz r3,4(r5) /* Get the real half */
2454 cmplw cr1,r6,r11 /* Is this what we want? */
2455 lwz r6,8(r5) /* Start to get the next virtual half */
2456 mr. r9,r9 /* Any more to try? */
2457 addi r5,r5,8 /* Bump to next slot */
2458 beq cr1,gotxlate /* We found what we were looking for... */
2459 bne+ fndpte /* Go try the next PTE... */
2461 mtmsr r10 /* Restore state */
2462 li r3,0 /* Show failure */
2463 isync /* Purge pipe */
2466 gotxlate: mtmsr r10 /* Restore state */
2467 rlwimi r3,r4,0,20,31 /* Cram in the page displacement */
2468 isync /* Purge pipe */
2474 * struct blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr)
2476 * This is used to add a block mapping entry to the MRU list whose top
2477 * node is anchored at bmaps. This is a real address and is also used as
2480 * Overlapping areas are not allowed. If we find one, we return it's address and
2481 * expect the upper layers to panic. We only check this for a debug build...
2486 .globl EXT(hw_add_blk)
2490 mfsprg r9,2 ; Get feature flags
2491 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2492 mfmsr r0 /* Save the MSR */
2493 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2494 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2495 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2496 mtcrf 0x04,r9 ; Set the features
2497 xor r3,r3,r6 ; Get real address of bmap anchor
2498 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2499 la r3,PMAP_BMAPS(r3) ; Point to bmap header
2501 bt pfNoMSRirb,habNoMSR ; No MSR...
2503 mtmsr r12 ; Translation and all off
2504 isync ; Toss prefetch
2510 li r0,loadMSR ; Get the MSR setter SC
2511 mr r3,r12 ; Get new MSR
2517 abLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2518 rlwinm. r8,r9,0,31,31 ; Is it locked?
2519 ori r8,r9,1 ; Set the lock
2520 bne- abLckw ; Yeah...
2521 stwcx. r8,0,r3 ; Lock the bmap list
2522 bne- abLck ; Someone else was trying, try again...
2523 b abSXg ; All done...
2527 abLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2528 beq+ abLck ; Not no more...
2529 lwz r9,0(r3) ; Get lock word again...
2530 b abLckw ; Check it out...
2534 nop ; Force ISYNC to last instruction in IFETCH
2537 abSXg: rlwinm r11,r9,0,0,26 ; Clear out flags and lock
2538 isync ; Make sure we have not used anything yet
2544 lwz r7,bmstart(r4) ; Get start
2545 lwz r8,bmend(r4) ; Get end
2546 mr r2,r11 ; Get chain
2548 abChk: mr. r10,r2 ; End of chain?
2549 beq abChkD ; Yes, chain is ok...
2550 lwz r5,bmstart(r10) ; Get start of current area
2551 lwz r6,bmend(r10) ; Get end of current area
2553 cmplw cr0,r8,r5 ; Is the end of the new before the old?
2554 cmplw cr1,r8,r6 ; Is the end of the new after the old?
2555 cmplw cr6,r6,r7 ; Is the end of the old before the new?
2556 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in old
2557 cmplw cr7,r6,r8 ; Is the end of the old after the new?
2558 lwz r2,bmnext(r10) ; Get pointer to the next
2559 cror cr6_eq,cr6_lt,cr7_gt ; Set cr2_eq if old not in new
2560 crand cr1_eq,cr1_eq,cr6_eq ; Set cr1_eq if no overlap
2561 beq+ cr1,abChk ; Ok check the next...
2563 lwz r8,blkFlags(r10) ; Get the flags
2564 rlwinm. r8,r8,0,blkRembit,blkRembit ; Check the blkRem bit
2565 beq abRet ; Is the mapping partially removed
2566 ori r10,r10,2 ; Indicate that this block is partially removed
2568 stw r9,0(r3) ; Unlock
2569 mtmsr r0 ; Restore xlation and rupts
2570 mr r3,r10 ; Pass back the overlap
2574 abChkD: stw r11,bmnext(r4) ; Chain this on in
2575 rlwimi r4,r9,0,27,31 ; Copy in locks and flags
2576 sync ; Make sure that is done
2578 stw r4,0(r3) ; Unlock and chain the new first one
2579 mtmsr r0 ; Restore xlation and rupts
2580 li r3,0 ; Pass back a no failure return code
2586 * struct blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2588 * This is used to remove a block mapping entry from the list that
2589 * is anchored at bmaps. bmaps is a virtual address and is also used as
2592 * Note that this function clears a single block that contains
2593 * any address within the range sva to eva (inclusive). To entirely
2594 * clear any range, hw_rem_blk must be called repeatedly until it
2597 * The block is removed from the list and all hash table entries
2598 * corresponding to the mapped block are invalidated and the TLB
2599 * entries are purged. If the block is large, this could take
2600 * quite a while. We need to hash every possible address in the
2601 * range and lock down the PCA.
2603 * If we attempt to remove a permanent entry, we will not do it.
2604 * The block address will be ored with 1 and returned.
2610 .globl EXT(hw_rem_blk)
2614 mfsprg r9,2 ; Get feature flags
2615 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2616 mfmsr r0 /* Save the MSR */
2617 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2618 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2619 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2620 mtcrf 0x04,r9 ; Set the features
2621 xor r3,r3,r6 ; Get real address of bmap anchor
2622 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2623 la r3,PMAP_BMAPS(r3) ; Point to the bmap chain head
2625 bt pfNoMSRirb,hrbNoMSR ; No MSR...
2627 mtmsr r12 ; Translation and all off
2628 isync ; Toss prefetch
2634 li r0,loadMSR ; Get the MSR setter SC
2635 mr r3,r12 ; Get new MSR
2641 cmp cr5,r0,r7 ; Request to invalidate the ptes
2645 lwz r4,bmstart(r10) ; Get start of current mapping
2646 lwz r5,bmend(r10) ; Get end of current mapping
2647 cmp cr5,r3,r3 ; Request to unlink the mapping
2649 rbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2650 rlwinm. r8,r9,0,31,31 ; Is it locked?
2651 ori r8,r9,1 ; Set the lock
2652 bne- rbLckw ; Yeah...
2653 stwcx. r8,0,r3 ; Lock the bmap list
2654 bne- rbLck ; Someone else was trying, try again...
2655 b rbSXg ; All done...
2659 rbLckw: rlwinm. r11,r9,0,31,31 ; Check if it is still held
2660 beq+ rbLck ; Not no more...
2661 lwz r9,0(r3) ; Get lock word again...
2662 b rbLckw ; Check it out...
2666 nop ; Force ISYNC to last instruction in IFETCH
2669 rbSXg: rlwinm. r2,r9,0,0,26 ; Clear out flags and lock
2670 mr r10,r3 ; Keep anchor as previous pointer
2671 isync ; Make sure we have not used anything yet
2673 beq- rbMT ; There is nothing in the list
2675 rbChk: mr r12,r10 ; Save the previous
2676 mr. r10,r2 ; End of chain?
2677 beq rbMT ; Yes, nothing to do...
2678 lwz r11,bmstart(r10) ; Get start of current area
2679 lwz r6,bmend(r10) ; Get end of current area
2681 cmplw cr0,r5,r11 ; Is the end of range before the start of the area?
2682 cmplw cr1,r4,r6 ; Is the start of range after the end of the area?
2683 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2684 lwz r2,bmnext(r10) ; Get the next one
2685 beq+ cr1,rbChk ; Not this one, check the next...
2687 cmplw cr1,r12,r3 ; Is the current mapping the first one?
2689 bne cr5,rbblkRem ; Do we have to unchain the mapping
2691 bne cr1,rbnFirst ; Yes, is this the first mapping?
2692 rlwimi r9,r2,0,0,26 ; Yes, Change the lock value
2693 ori r2,r9,1 ; Turn on the lock bit
2695 stw r2,bmnext(r12) ; Unchain us
2701 lwz r8,blkFlags(r10) ; Get the flags
2703 rlwinm. r7,r8,0,blkPermbit,blkPermbit ; is this a permanent block?
2705 bne- rbPerm ; This is permanent, do not remove...
2707 rlwinm. r7,r8,0,blkRembit,blkRembit ; is this mapping partially removed
2709 beq rbblkRemcont ; If not, check the max size
2710 lwz r11,bmcurrent(r10) ; If yes, resume for the current page
2712 cmp cr5,r11,r6 ; No partial remove left
2713 beq cr5, rbpendret ; But there is a pending remove
2716 bne rbblkRemcont1 ; Is it the first remove
2718 oris r8,r8,hi16(blkRem) ; Yes
2719 stw r8,blkFlags(r10) ; set the blkRem bit in blkFlags
2722 lis r5,hi16(BLKREMMAX*4096) ; Load maximun size tear down
2723 ori r5,r5,lo16(BLKREMMAX*4096) ; Load maximun size tear down
2724 sub r7,r6,r11 ; Get the remaining size to tear down
2725 cmp cr5,r7,r5 ; Compare against the maximun size
2726 ble cr5,rbfullblk ; If less or equal, go remove the mapping
2728 add r7,r11,r5 ; Add the max size tear down to the current page
2729 stw r7,bmcurrent(r10) ; Update the current page
2730 subi r6,r7,1 ; Set the current end of the partial tear down
2734 stw r6,bmcurrent(r10) ; Update the current page
2737 lwz r8,bmspace(r10) ; Get the VSID
2739 stw r9,0(r3) ; Unlock and chain the new first one
2741 eqv r4,r4,r4 ; Fill the bottom with foxes
2742 mfspr r12,sdr1 ; Get hash table base and size
2743 rlwinm r8,r8,6,0,25 ; Align VSID to PTEG
2744 rlwimi r4,r12,16,0,15 ; Make table size - 1 out of mask
2745 andc r12,r12,r4 ; Clean up address of hash table
2746 rlwinm r5,r11,26,6,25 ; Rotate virtual start address into PTEG units
2747 add r12,r12,r4 ; Point to PCA - 1
2748 rlwinm r6,r6,26,6,25 ; Rotate virtual end address into PTEG units
2749 addi r12,r12,1 ; Point to PCA base
2750 sub r6,r6,r5 ; Get the total number of PTEGs to clear
2751 cmplw r6,r4 ; See if this wraps all the way around
2752 blt rbHash ; Nope, length is right
2753 subi r6,r4,32+31 ; Back down to correct length
2755 rbHash: rlwinm r5,r5,0,10,25 ; Keep only the page index
2756 xor r2,r8,r5 ; Hash into table
2757 and r2,r2,r4 ; Wrap into the table
2758 add r2,r2,r12 ; Point right at the PCA
2760 rbLcka: lwarx r7,0,r2 ; Get the PTEG lock
2761 mr. r7,r7 ; Is it locked?
2762 bne- rbLckwa ; Yeah...
2763 li r7,1 ; Get the locked value
2764 stwcx. r7,0,r2 ; Take it
2765 bne- rbLcka ; Someone else was trying, try again...
2766 b rbSXga ; All done...
2768 rbLckwa: mr. r7,r7 ; Check if it is already held
2769 beq+ rbLcka ; It is clear...
2770 lwz r7,0(r2) ; Get lock word again...
2773 rbSXga: isync ; Make sure nothing used yet
2774 lwz r7,PCAallo(r2) ; Get the allocation word
2775 rlwinm. r11,r7,8,0,7 ; Isolate the autogenerated PTEs
2776 or r7,r7,r11 ; Release the autogen slots
2777 beq+ rbAintNone ; There are not any here
2778 mtcrf 0xC0,r11 ; Set the branch masks for autogens
2779 sub r11,r2,r4 ; Move back to the hash table + 1
2780 rlwinm r7,r7,0,16,7 ; Clear the autogen field
2781 subi r11,r11,1 ; Point to the PTEG
2782 stw r7,PCAallo(r2) ; Update the flags
2783 li r7,0 ; Get an invalid PTE value
2785 bf 0,rbSlot1 ; No autogen here
2786 stw r7,0x00(r11) ; Invalidate PTE
2787 rbSlot1: bf 1,rbSlot2 ; No autogen here
2788 stw r7,0x08(r11) ; Invalidate PTE
2789 rbSlot2: bf 2,rbSlot3 ; No autogen here
2790 stw r7,0x10(r11) ; Invalidate PTE
2791 rbSlot3: bf 3,rbSlot4 ; No autogen here
2792 stw r7,0x18(r11) ; Invalidate PTE
2793 rbSlot4: bf 4,rbSlot5 ; No autogen here
2794 stw r7,0x20(r11) ; Invalidate PTE
2795 rbSlot5: bf 5,rbSlot6 ; No autogen here
2796 stw r7,0x28(r11) ; Invalidate PTE
2797 rbSlot6: bf 6,rbSlot7 ; No autogen here
2798 stw r7,0x30(r11) ; Invalidate PTE
2799 rbSlot7: bf 7,rbSlotx ; No autogen here
2800 stw r7,0x38(r11) ; Invalidate PTE
2803 rbAintNone: li r7,0 ; Clear this out
2804 sync ; To make SMP happy
2805 addic. r6,r6,-64 ; Decrement the count
2806 stw r7,PCAlock(r2) ; Release the PTEG lock
2807 addi r5,r5,64 ; Move up by adjusted page number
2808 bge+ rbHash ; Not done...
2810 sync ; Make sure the memory is quiet
2813 ; Here we take the easy way out and just purge the entire TLB. This is
2814 ; certainly faster and definitly easier than blasting just the correct ones
2815 ; in the range, we only need one lock and one TLBSYNC. We would hope
2816 ; that most blocks are more than 64 pages (256K) and on every machine
2817 ; up to Book E, 64 TLBIEs will invalidate the entire table.
2820 li r5,64 ; Get number of TLB entries to purge
2821 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
2822 li r6,0 ; Start at 0
2823 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
2825 rbTlbL: lwarx r2,0,r12 ; Get the TLBIE lock
2826 mr. r2,r2 ; Is it locked?
2827 li r2,1 ; Get our lock value
2828 bne- rbTlbL ; It is locked, go wait...
2829 stwcx. r2,0,r12 ; Try to get it
2830 bne- rbTlbL ; We was beat...
2832 rbTlbN: addic. r5,r5,-1 ; See if we did them all
2833 tlbie r6 ; Invalidate it everywhere
2834 addi r6,r6,0x1000 ; Up to the next page
2835 bgt+ rbTlbN ; Make sure we have done it all...
2837 mfspr r5,pvr ; Find out what kind of machine we are
2838 li r2,0 ; Lock clear value
2840 rlwinm r5,r5,16,16,31 ; Isolate CPU type
2841 cmplwi r5,3 ; Is this a 603?
2842 sync ; Make sure all is quiet
2843 beq- rbits603a ; It is a 603, skip the tlbsync...
2845 eieio ; Make sure that the tlbie happens first
2846 tlbsync ; wait for everyone to catch up
2849 rbits603a: sync ; Wait for quiet again
2850 stw r2,0(r12) ; Unlock invalidates
2852 sync ; Make sure that is done
2854 ble cr5,rbunlink ; If all ptes are flush, go unlink the mapping
2855 mtmsr r0 ; Restore xlation and rupts
2856 mr r3,r10 ; Pass back the removed block in progress
2857 ori r3,r3,2 ; Indicate that the block remove isn't completed yet
2862 stw r9,0(r3) ; Unlock
2863 mtmsr r0 ; Restore xlation and rupts
2864 mr r3,r10 ; Pass back the removed block in progress
2865 ori r3,r3,2 ; Indicate that the block remove isn't completed yet
2870 rbMT: stw r9,0(r3) ; Unlock
2871 mtmsr r0 ; Restore xlation and rupts
2872 li r3,0 ; Say we did not find one
2876 rbPerm: stw r9,0(r3) ; Unlock
2877 mtmsr r0 ; Restore xlation and rupts
2878 ori r3,r10,1 ; Say we did not remove it
2882 rbDone: stw r9,0(r3) ; Unlock
2883 mtmsr r0 ; Restore xlation and rupts
2884 mr r3,r10 ; Pass back the removed block
2889 * hw_select_mappings(struct mappingflush *mappingflush)
2892 * Ouput: up to 8 user mappings
2894 * hw_select_mappings() scans every PCA mapping hash lists and select
2895 * the last user mapping if it exists.
2900 .globl EXT(hw_select_mappings)
2902 LEXT(hw_select_mappings)
2903 mr r5,r3 ; Get the mapping flush addr
2904 mfmsr r12 ; Get the MSR
2905 rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2906 rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2907 mfsprg r9,2 ; Get feature flags
2908 andi. r0,r12,0x7FCF ; Disable translation and interruptions
2909 mtcrf 0x04,r9 ; Set the features
2910 bt pfNoMSRirb,hvmNoMSR ; No MSR...
2915 mr r3,r0 ; Get the new MSR
2916 li r0,loadMSR ; Get the MSR setter SC
2920 li r11,1 ; Get the locked value
2923 lwz r3,MFpcaptr(r5) ; Get the PCA pointer
2924 lwarx r10,0,r3 ; Get the PTEG lock
2925 mr. r10,r10 ; Is it locked?
2926 bne- hvmptegLckwx ; Yeah...
2927 stwcx. r11,0,r3 ; Take take it
2928 bne- hvmptegLckx ; Someone else was trying, try again...
2929 b hvmptegSXgx ; All done...
2934 mr. r10,r10 ; Check if it is already held
2935 beq+ hvmptegLckx ; It's clear...
2936 lwz r10,0(r3) ; Get lock word again...
2937 b hvmptegLckwx ; Wait...
2942 isync ; Make sure we haven't used anything yet
2944 li r11,8 ; set count to 8
2946 lwz r6,PCAhash(r3) ; load the first mapping hash list
2947 la r12,PCAhash(r3) ; Point to the mapping hash area
2948 la r4,MFmapping(r5) ; Point to the mapping flush mapping area
2950 stw r7,MFmappingcnt(r5) ; Set the current count to 0
2952 li r10,0 ; Mapping test
2955 mr. r6,r6 ; Test if the hash list current pointer is zero
2956 beq hvmfindmapret ; Did we hit the end of the hash list
2957 lwz r7,mmPTEv(r6) ; Pick up our virtual ID
2958 rlwinm r8,r7,5,0,19 ; Pick VSID 20 lower bits
2960 beq hvmfindmapnext ; Skip Kernel VSIDs
2961 rlwinm r8,r7,1,0,3 ; Extract the Segment index
2962 rlwinm r9,r7,22,4,9 ; Extract API 6 upper bits
2963 or r8,r8,r9 ; Add to the virtual address
2964 rlwinm r9,r7,31,6,25 ; Pick VSID 19 lower bits
2965 xor r9,r9,r3 ; Exclusive or with the PCA address
2966 rlwinm r9,r9,6,10,19 ; Extract API 10 lower bits
2967 or r8,r8,r9 ; Add to the virtual address
2969 stw r8,4(r4) ; Store the virtual address
2970 lwz r8,mmpmap(r6) ; Get the pmap
2971 stw r8,0(r4) ; Store the pmap
2972 li r10,1 ; Found one
2975 lwz r6,mmhashnext(r6) ; Pick up next mapping block
2976 b hvmfindmap ; Scan the next mapping
2978 mr. r10,r10 ; Found mapping
2979 beq hvmnexthashprep ; If not, do not update the mappingflush array
2980 lwz r7,MFmappingcnt(r5) ; Get the current count
2981 addi r7,r7,1 ; Increment the current count
2982 stw r7,MFmappingcnt(r5) ; Store the current count
2983 addi r4,r4,MFmappingSize ; Point to the next mapping flush entry
2985 addi r12,r12,4 ; Load the next hash list
2986 lwz r6,0(r12) ; Load the next hash list entry
2987 subi r11,r11,1 ; Decrement hash list index
2988 mr. r11,r11 ; Test for a remaining hash list
2989 bne hvmnexthash ; Loop to scan the next hash list
2992 stw r10,0(r3) ; Unlock the hash list
2993 mtmsr r0 ; Restore translation and interruptions
2998 * vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va)
3000 * This is used to translate a virtual address within a block mapping entry
3001 * to a physical address. If not found, 0 is returned.
3006 .globl EXT(hw_cvp_blk)
3010 mfsprg r9,2 ; Get feature flags
3011 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
3012 mfmsr r0 /* Save the MSR */
3013 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3014 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3015 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
3016 mtcrf 0x04,r9 ; Set the features
3017 xor r3,r3,r6 ; Get real address of bmap anchor
3018 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
3019 la r3,PMAP_BMAPS(r3) ; Point to chain header
3021 bt pfNoMSRirb,hcbNoMSR ; No MSR...
3023 mtmsr r12 ; Translation and all off
3024 isync ; Toss prefetch
3030 li r0,loadMSR ; Get the MSR setter SC
3031 mr r3,r12 ; Get new MSR
3037 cbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
3038 rlwinm. r8,r9,0,31,31 ; Is it locked?
3039 ori r8,r9,1 ; Set the lock
3040 bne- cbLckw ; Yeah...
3041 stwcx. r8,0,r3 ; Lock the bmap list
3042 bne- cbLck ; Someone else was trying, try again...
3043 b cbSXg ; All done...
3047 cbLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
3048 beq+ cbLck ; Not no more...
3049 lwz r9,0(r3) ; Get lock word again...
3050 b cbLckw ; Check it out...
3054 nop ; Force ISYNC to last instruction in IFETCH
3060 cbSXg: rlwinm. r11,r9,0,0,26 ; Clear out flags and lock
3061 li r2,0 ; Assume we do not find anything
3062 isync ; Make sure we have not used anything yet
3064 cbChk: mr. r11,r11 ; Is there more?
3065 beq- cbDone ; No more...
3066 lwz r5,bmstart(r11) ; Get the bottom of range
3067 lwz r12,bmend(r11) ; Get the top of range
3068 cmplw cr0,r4,r5 ; Are we before the entry?
3069 cmplw cr1,r4,r12 ; Are we after of the entry?
3070 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
3071 beq- cr1,cbNo ; We are not in the range...
3073 lwz r2,bmPTEr(r11) ; Get the real part of the PTE
3074 sub r5,r4,r5 ; Get offset into area
3075 rlwinm r2,r2,0,0,19 ; Clean out everything but the page
3076 add r2,r2,r5 ; Adjust the real address
3078 cbDone: stw r9,0(r3) ; Unlock it, we are done with it (no sync needed)
3079 mtmsr r0 ; Restore translation and interrupts...
3080 isync ; Make sure it is on
3081 mr r3,r2 ; Set return physical address
3086 cbNo: lwz r11,bmnext(r11) ; Link next
3087 b cbChk ; Check it out...
3091 * hw_set_user_space(pmap)
3092 * hw_set_user_space_dis(pmap)
3094 * Indicate whether memory space needs to be switched.
3095 * We really need to turn off interrupts here, because we need to be non-preemptable
3097 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
3098 * register usage here. The VMM switch code in vmachmon.s that calls this
3099 * know what registers are in use. Check that if these change.
3105 .globl EXT(hw_set_user_space)
3107 LEXT(hw_set_user_space)
3109 mfmsr r10 /* Get the current MSR */
3110 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3111 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3112 rlwinm r9,r10,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off 'rupts */
3113 mtmsr r9 /* Disable 'em */
3114 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
3115 lwz r4,PMAP_SPACE(r3) ; Get the space
3116 mfsprg r6,0 /* Get the per_proc_info address */
3117 xor r3,r3,r7 ; Get real address of bmap anchor
3118 stw r4,PP_USERSPACE(r6) /* Show our new address space */
3119 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
3120 mtmsr r10 /* Restore interruptions */
3124 .globl EXT(hw_set_user_space_dis)
3126 LEXT(hw_set_user_space_dis)
3128 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
3129 lwz r4,PMAP_SPACE(r3) ; Get the space
3130 mfsprg r6,0 ; Get the per_proc_info address
3131 xor r3,r3,r7 ; Get real address of bmap anchor
3132 stw r4,PP_USERSPACE(r6) ; Show our new address space
3133 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
3137 /* struct mapping *hw_cpv(struct mapping *mp) - Converts a physcial mapping CB address to virtual
3146 rlwinm. r4,r3,0,0,19 ; Round back to the mapping block allocation control block
3147 mfmsr r10 ; Get the current MSR
3148 beq- hcpvret ; Skip if we are passed a 0...
3149 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3150 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3151 andi. r9,r10,0x7FEF ; Turn off interrupts and data translation
3152 mtmsr r9 ; Disable DR and EE
3155 lwz r4,mbvrswap(r4) ; Get the conversion value
3156 mtmsr r10 ; Interrupts and DR back on
3158 xor r3,r3,r4 ; Convert to physical
3160 hcpvret: rlwinm r3,r3,0,0,26 ; Clean out any flags
3164 /* struct mapping *hw_cvp(struct mapping *mp) - Converts a virtual mapping CB address to physcial
3166 * Translation must be on for this
3175 rlwinm r4,r3,0,0,19 ; Round back to the mapping block allocation control block
3176 rlwinm r3,r3,0,0,26 ; Clean out any flags
3177 lwz r4,mbvrswap(r4) ; Get the conversion value
3178 xor r3,r3,r4 ; Convert to virtual
3182 /* int mapalc(struct mappingblok *mb) - Finds, allocates, and checks a free mapping entry in a block
3184 * Lock must already be held on mapping block list
3185 * returns 0 if all slots filled.
3186 * returns n if a slot is found and it is not the last
3187 * returns -n if a slot os found and it is the last
3188 * when n and -n are returned, the corresponding bit is cleared
3197 lwz r4,mbfree(r3) ; Get the first mask
3198 lis r0,0x8000 ; Get the mask to clear the first free bit
3199 lwz r5,mbfree+4(r3) ; Get the second mask
3200 mr r12,r3 ; Save the return
3201 cntlzw r8,r4 ; Get first free field
3202 lwz r6,mbfree+8(r3) ; Get the third mask
3203 srw. r9,r0,r8 ; Get bit corresponding to first free one
3204 lwz r7,mbfree+12(r3) ; Get the fourth mask
3205 cntlzw r10,r5 ; Get first free field in second word
3206 andc r4,r4,r9 ; Turn it off
3207 bne malcfnd0 ; Found one...
3209 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3210 cntlzw r11,r6 ; Get first free field in third word
3211 andc r5,r5,r9 ; Turn it off
3212 bne malcfnd1 ; Found one...
3214 srw. r9,r0,r11 ; Get bit corresponding to first free one in third word
3215 cntlzw r10,r7 ; Get first free field in fourth word
3216 andc r6,r6,r9 ; Turn it off
3217 bne malcfnd2 ; Found one...
3219 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3220 li r3,0 ; Assume abject failure
3221 andc r7,r7,r9 ; Turn it off
3222 beqlr ; There are none any left...
3224 addi r3,r10,96 ; Set the correct bit number
3225 stw r7,mbfree+12(r12) ; Actually allocate the slot
3227 mapafin: or r4,r4,r5 ; Merge the first two allocation maps
3228 or r6,r6,r7 ; Then the last two
3229 or. r4,r4,r6 ; Merge both halves
3230 bnelr+ ; Return if some left for next time...
3232 neg r3,r3 ; Indicate we just allocated the last one
3235 malcfnd0: stw r4,mbfree(r12) ; Actually allocate the slot
3236 mr r3,r8 ; Set the correct bit number
3237 b mapafin ; Exit now...
3239 malcfnd1: stw r5,mbfree+4(r12) ; Actually allocate the slot
3240 addi r3,r10,32 ; Set the correct bit number
3241 b mapafin ; Exit now...
3243 malcfnd2: stw r6,mbfree+8(r12) ; Actually allocate the slot
3244 addi r3,r11,64 ; Set the correct bit number
3245 b mapafin ; Exit now...
3249 * Log out all memory usage
3257 mfmsr r2 ; Get the MSR
3258 lis r10,hi16(EXT(DebugWork)) ; High part of area
3259 rlwinm r2,r2,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3260 lis r12,hi16(EXT(mem_actual)) ; High part of actual
3261 rlwinm r2,r2,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3262 andi. r0,r2,0x7FCF ; Interrupts and translation off
3263 ori r10,r10,lo16(EXT(DebugWork)) ; Get the entry
3264 mtmsr r0 ; Turn stuff off
3265 ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual
3270 stw r0,4(r10) ; Force logging off
3271 lwz r0,0(r12) ; Get the end of memory
3273 lis r12,hi16(EXT(mem_size)) ; High part of defined memory
3274 ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory
3275 lwz r12,0(r12) ; Make it end of defined
3277 cmplw r0,r12 ; Is there room for the data?
3278 ble- logmemexit ; No, do not even try...
3280 stw r12,0(r12) ; Set defined memory size
3281 stw r0,4(r12) ; Set the actual amount of memory
3283 lis r3,hi16(EXT(hash_table_base)) ; Hash table address
3284 lis r4,hi16(EXT(hash_table_size)) ; Hash table size
3285 lis r5,hi16(EXT(pmap_mem_regions)) ; Memory regions
3286 lis r6,hi16(EXT(mapCtl)) ; Mappings
3287 ori r3,r3,lo16(EXT(hash_table_base))
3288 ori r4,r4,lo16(EXT(hash_table_size))
3289 ori r5,r5,lo16(EXT(pmap_mem_regions))
3290 ori r6,r6,lo16(EXT(mapCtl))
3293 lwz r5,4(r5) ; Get the pointer to the phys_ent table
3294 lwz r6,0(r6) ; Get the pointer to the current mapping block
3295 stw r3,8(r12) ; Save the hash table address
3296 stw r4,12(r12) ; Save the hash table size
3297 stw r5,16(r12) ; Save the physent pointer
3298 stw r6,20(r12) ; Save the mappings
3300 addi r11,r12,0x1000 ; Point to area to move hash table and PCA
3302 add r4,r4,r4 ; Double size for both
3304 copyhash: lwz r7,0(r3) ; Copy both of them
3317 rlwinm r4,r12,20,12,31 ; Get number of phys_ents
3319 copyphys: lwz r7,0(r5) ; Copy physents
3328 addi r11,r11,4095 ; Round up to next page
3329 rlwinm r11,r11,0,0,19
3331 lwz r4,4(r6) ; Get the size of the mapping area
3333 copymaps: lwz r7,0(r6) ; Copy the mappings
3346 sub r11,r11,r12 ; Get the total length we saved
3347 stw r11,24(r12) ; Save the size
3349 logmemexit: mtmsr r2 ; Back to normal