2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 #include <db_machine_commands.h>
28 #include <mach_debug.h>
30 #include <ppc/proc_reg.h>
31 #include <ppc/exception.h>
32 #include <ppc/Performance.h>
33 #include <ppc/exception.h>
34 #include <ppc/pmap_internals.h>
35 #include <mach/ppc/vm_param.h>
42 * Random notes and musings...
44 * Access to mappings via the PTEG hash must be done with the list locked.
45 * Access via the physical entries is controlled by the physent lock.
46 * Access to mappings is controlled by the PTEG lock once they are queued.
47 * If they are not on the list, they don't really exist, so
48 * only one processor at a time can find them, so no access control is needed.
50 * The second half of the PTE is kept in the physical entry. It is done this
51 * way, because there may be multiple mappings that refer to the same physical
52 * page (i.e., address aliases or synonymns). We must do it this way, because
53 * maintenance of the reference and change bits becomes nightmarish if each mapping
54 * has its own. One side effect of this, and not necessarily a bad one, is that
55 * all mappings for a single page can have a single WIMG, protection state, and RC bits.
56 * The only "bad" thing, is the reference bit. With a single copy, we can not get
57 * a completely accurate working set calculation, i.e., we can't tell which mapping was
58 * used to reference the page, all we can tell is that the physical page was
61 * The master copys of the reference and change bits are kept in the phys_entry.
62 * Other than the reference and change bits, changes to the phys_entry are not
63 * allowed if it has any mappings. The master reference and change bits must be
64 * changed via atomic update.
66 * Invalidating a PTE merges the RC bits into the phys_entry.
68 * Before checking the reference and/or bits, ALL mappings to the physical page are
71 * PTEs are never explicitly validated, they are always faulted in. They are also
72 * not visible outside of the hw_vm modules. Complete seperation of church and state.
74 * Removal of a mapping is invalidates its PTE.
76 * So, how do we deal with mappings to I/O space? We don't have a physent for it.
77 * Within the mapping is a copy of the second half of the PTE. This is used
78 * ONLY when there is no physical entry. It is swapped into the PTE whenever
79 * it is built. There is no need to swap it back out, because RC is not
80 * maintained for these mappings.
82 * So, I'm starting to get concerned about the number of lwarx/stcwx loops in
83 * this. Satisfying a mapped address with no stealing requires one lock. If we
84 * steal an entry, there's two locks and an atomic update. Invalidation of an entry
85 * takes one lock and, if there is a PTE, another lock and an atomic update. Other
86 * operations are multiples (per mapping) of the above. Maybe we should look for
87 * an alternative. So far, I haven't found one, but I haven't looked hard.
91 /* hw_add_map(struct mapping *mp, space_t space, vm_offset_t va) - Adds a mapping
93 * Adds a mapping to the PTEG hash list.
95 * Interrupts must be disabled before calling.
97 * Using the space and the virtual address, we hash into the hash table
98 * and get a lock on the PTEG hash chain. Then we chain the
99 * mapping to the front of the list.
104 .globl EXT(hw_add_map)
108 #if PERFTIMES && DEBUG
112 bl EXT(dbgLog2) ; Start of hw_add_map
117 mfmsr r0 /* Get the MSR */
118 eqv r6,r6,r6 /* Fill the bottom with foxes */
119 rlwinm r11,r4,6,6,25 /* Position the space for the VSID */
120 mfspr r10,sdr1 /* Get hash table base and size */
121 rlwimi r11,r5,30,2,5 /* Insert the segment no. to make a VSID */
122 mfsprg r12,2 ; Get feature flags
123 rlwimi r6,r10,16,0,15 /* Make table size -1 out of mask */
124 rlwinm r7,r5,26,10,25 /* Isolate the page index */
125 or r8,r10,r6 /* Point to the last byte in table */
126 rlwinm r9,r5,4,0,3 ; Move nybble 1 up to 0
127 xor r7,r7,r11 /* Get primary hash */
128 mtcrf 0x04,r12 ; Set the features
129 andi. r12,r0,0x7FCF /* Disable translation and interruptions */
130 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
131 addi r8,r8,1 /* Point to the PTEG Control Area */
132 xor r9,r9,r5 ; Splooch vaddr nybble 0 and 1 together
133 and r7,r7,r6 /* Wrap the hash */
134 rlwimi r11,r5,10,26,31 /* Move API into pte ID */
135 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
136 add r8,r8,r7 /* Point to our PCA entry */
137 rlwinm r10,r4,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
139 bt pfNoMSRirb,hamNoMSR ; No MSR...
141 mtmsr r12 ; Translation and all off
142 isync ; Toss prefetch
145 hamNoMSR: mr r4,r0 ; Save R0
147 li r0,loadMSR ; Get the MSR setter SC
148 mr r3,r12 ; Get new MSR
154 la r4,PCAhash(r8) /* Point to the mapping hash area */
155 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
156 isync /* Get rid of anything prefetched before we ref storage */
158 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
159 * and word 0 of the PTE (the virtual part).
161 * Now, we just lock the PCA.
164 li r12,1 /* Get the locked value */
165 dcbt 0,r4 /* We'll need the hash area in a sec, so get it */
166 add r4,r4,r9 /* Point to the right mapping hash slot */
170 ptegLckx: lwarx r10,0,r8 /* Get the PTEG lock */
171 mr. r10,r10 /* Is it locked? */
172 bne- ptegLckwx /* Yeah... */
173 stwcx. r12,0,r8 /* Take take it */
174 bne- ptegLckx /* Someone else was trying, try again... */
175 b ptegSXgx /* All done... */
179 ptegLckwx: mr. r10,r10 /* Check if it's already held */
180 beq+ ptegLckx /* It's clear... */
181 lwz r10,0(r8) /* Get lock word again... */
182 b ptegLckwx /* Wait... */
186 ptegSXgx: isync /* Make sure we haven't used anything yet */
188 lwz r7,0(r4) /* Pick up the anchor of hash list */
189 stw r3,0(r4) /* Save the new head */
190 stw r7,mmhashnext(r3) /* Chain in the old head */
192 stw r4,mmPTEhash(r3) /* Point to the head of the hash list */
194 sync /* Make sure the chain is updated */
195 stw r10,0(r8) /* Unlock the hash list */
196 mtmsr r0 /* Restore translation and interruptions */
197 isync /* Toss anything done with DAT off */
198 #if PERFTIMES && DEBUG
202 bl EXT(dbgLog2) ; end of hw_add_map
209 /* mp=hw_lock_phys_vir(space, va) - Finds and locks a physical entry by vaddr.
211 * Returns the mapping with the associated physent locked if found, or a
212 * zero and no lock if not. It we timed out trying to get a the lock on
213 * the physical entry, we retun a 1. A physical entry can never be on an
214 * odd boundary, so we can distinguish between a mapping and a timeout code.
216 * Interrupts must be disabled before calling.
218 * Using the space and the virtual address, we hash into the hash table
219 * and get a lock on the PTEG hash chain. Then we search the chain for the
220 * mapping for our virtual address. From there, we extract the pointer to
221 * the physical entry.
223 * Next comes a bit of monkey business. we need to get a lock on the physical
224 * entry. But, according to our rules, we can't get it after we've gotten the
225 * PTEG hash lock, we could deadlock if we do. So, we need to release the
226 * hash lock. The problem is, though, that as soon as we release it, some
227 * other yahoo may remove our mapping between the time that we release the
228 * hash lock and obtain the phys entry lock. So, we can't count on the
229 * mapping once we release the lock. Instead, after we lock the phys entry,
230 * we search the mapping list (phys_link) for our translation. If we don't find it,
231 * we unlock the phys entry, bail out, and return a 0 for the mapping address. If we
232 * did find it, we keep the lock and return the address of the mapping block.
234 * What happens when a mapping is found, but there is no physical entry?
235 * This is what happens when there is I/O area mapped. It one of these mappings
236 * is found, the mapping is returned, as is usual for this call, but we don't
237 * try to lock anything. There could possibly be some problems here if another
238 * processor releases the mapping while we still alre using it. Hope this
239 * ain't gonna happen.
241 * Taaa-dahhh! Easy as pie, huh?
243 * So, we have a few hacks hacks for running translate off in here.
244 * First, when we call the lock routine, we have carnel knowlege of the registers is uses.
245 * That way, we don't need a stack frame, which we can't have 'cause the stack is in
246 * virtual storage. But wait, as if that's not enough... We need one more register. So,
247 * we cram the LR into the CTR and return from there.
251 .globl EXT(hw_lock_phys_vir)
253 LEXT(hw_lock_phys_vir)
255 #if PERFTIMES && DEBUG
259 bl EXT(dbgLog2) ; Start of hw_add_map
263 mfmsr r12 /* Get the MSR */
264 eqv r6,r6,r6 /* Fill the bottom with foxes */
265 mfsprg r9,2 ; Get feature flags
266 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
267 mfspr r5,sdr1 /* Get hash table base and size */
268 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
269 mtcrf 0x04,r9 ; Set the features
270 rlwimi r6,r5,16,0,15 /* Make table size -1 out of mask */
271 andi. r0,r12,0x7FCF /* Disable translation and interruptions */
272 rlwinm r9,r4,4,0,3 ; Move nybble 1 up to 0
273 rlwinm r7,r4,26,10,25 /* Isolate the page index */
274 or r8,r5,r6 /* Point to the last byte in table */
275 xor r7,r7,r11 /* Get primary hash */
276 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
277 addi r8,r8,1 /* Point to the PTEG Control Area */
278 xor r9,r9,r4 ; Splooch vaddr nybble 0 and 1 together
279 and r7,r7,r6 /* Wrap the hash */
280 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
281 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
282 add r8,r8,r7 /* Point to our PCA entry */
283 rlwinm r10,r3,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
285 bt pfNoMSRirb,hlpNoMSR ; No MSR...
287 mtmsr r0 ; Translation and all off
288 isync ; Toss prefetch
291 hlpNoMSR: mr r3,r0 ; Get the new MSR
292 li r0,loadMSR ; Get the MSR setter SC
296 la r3,PCAhash(r8) /* Point to the mapping hash area */
297 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
298 isync /* Make sure translation is off before we ref storage */
301 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
302 * and word 0 of the PTE (the virtual part).
304 * Now, we just lock the PCA and find our mapping, if it exists.
307 dcbt 0,r3 /* We'll need the hash area in a sec, so get it */
308 add r3,r3,r9 /* Point to the right mapping hash slot */
312 ptegLcka: lwarx r10,0,r8 /* Get the PTEG lock */
313 li r5,1 /* Get the locked value */
314 mr. r10,r10 /* Is it locked? */
315 bne- ptegLckwa /* Yeah... */
316 stwcx. r5,0,r8 /* Take take it */
317 bne- ptegLcka /* Someone else was trying, try again... */
318 b ptegSXga /* All done... */
322 ptegLckwa: mr. r10,r10 /* Check if it's already held */
323 beq+ ptegLcka /* It's clear... */
324 lwz r10,0(r8) /* Get lock word again... */
325 b ptegLckwa /* Wait... */
329 ptegSXga: isync /* Make sure we haven't used anything yet */
331 mflr r0 /* Get the LR */
332 lwz r9,0(r3) /* Pick up the first mapping block */
333 mtctr r0 /* Stuff it into the CTR */
337 mr. r3,r9 /* Did we hit the end? */
338 bne+ chkmapa /* Nope... */
340 stw r3,0(r8) /* Unlock the PTEG lock
341 Note: we never saved anything while we
342 had the lock, so we don't need a sync
343 before we unlock it */
345 vbail: mtmsr r12 /* Restore translation and interruptions */
346 isync /* Make sure translation is cool */
347 #if PERFTIMES && DEBUG
351 bl EXT(dbgLog2) ; Start of hw_add_map
355 bctr /* Return in abject failure... */
359 chkmapa: lwz r10,mmPTEv(r3) /* Pick up our virtual ID */
360 lwz r9,mmhashnext(r3) /* Pick up next mapping block */
361 cmplw r10,r11 /* Have we found ourself? */
362 bne- findmapa /* Nope, still wandering... */
364 lwz r9,mmphysent(r3) /* Get our physical entry pointer */
365 li r5,0 /* Clear this out */
366 mr. r9,r9 /* Is there, like, a physical entry? */
367 stw r5,0(r8) /* Unlock the PTEG lock
368 Note: we never saved anything while we
369 had the lock, so we don't need a sync
370 before we unlock it */
372 beq- vbail /* If there is no physical entry, it's time
375 /* Here we want to call hw_lock_bit. We don't want to use the stack, 'cause it's
376 * in virtual storage, and we're in real. So, we've carefully looked at the code
377 * in hw_lock_bit (and unlock) and cleverly don't use any of the registers that it uses.
378 * Be very, very aware of how you change this code. By the way, it uses:
379 * R0, R6, R7, R8, and R9. R3, R4, and R5 contain parameters
380 * Unfortunatly, we need to stash R9 still. So... Since we know we will not be interrupted
381 * ('cause we turned off interruptions and translation is off) we will use SPRG3...
384 lwz r10,mmPTEhash(r3) /* Save the head of the hash-alike chain. We need it to find ourselves later */
385 lis r5,HIGH_ADDR(EXT(LockTimeOut)) /* Get address of timeout value */
386 la r3,pephyslink(r9) /* Point to the lock word */
387 ori r5,r5,LOW_ADDR(EXT(LockTimeOut)) /* Get second half of address */
388 li r4,PHYS_LOCK /* Get the lock bit value */
389 lwz r5,0(r5) /* Pick up the timeout value */
390 mtsprg 3,r9 /* Save R9 in SPRG3 */
392 bl EXT(hw_lock_bit) /* Go do the lock */
394 mfsprg r9,3 /* Restore pointer to the phys_entry */
395 mr. r3,r3 /* Did we timeout? */
396 lwz r4,pephyslink(r9) /* Pick up first mapping block */
397 beq- penterr /* Bad deal, we timed out... */
399 rlwinm r4,r4,0,0,26 ; Clear out the flags from first link
401 findmapb: mr. r3,r4 /* Did we hit the end? */
402 bne+ chkmapb /* Nope... */
404 la r3,pephyslink(r9) /* Point to where the lock is */
405 li r4,PHYS_LOCK /* Get the lock bit value */
406 bl EXT(hw_unlock_bit) /* Go unlock the physentry */
408 li r3,0 /* Say we failed */
409 b vbail /* Return in abject failure... */
411 penterr: li r3,1 /* Set timeout */
412 b vbail /* Return in abject failure... */
416 chkmapb: lwz r6,mmPTEv(r3) /* Pick up our virtual ID */
417 lwz r4,mmnext(r3) /* Pick up next mapping block */
418 cmplw r6,r11 /* Have we found ourself? */
419 lwz r5,mmPTEhash(r3) /* Get the start of our hash chain */
420 bne- findmapb /* Nope, still wandering... */
421 cmplw r5,r10 /* On the same hash chain? */
422 bne- findmapb /* Nope, keep looking... */
424 b vbail /* Return in glorious triumph... */
428 * hw_rem_map(mapping) - remove a mapping from the system.
430 * Upon entry, R3 contains a pointer to a mapping block and the associated
431 * physical entry is locked if there is one.
433 * If the mapping entry indicates that there is a PTE entry, we invalidate
434 * if and merge the reference and change information into the phys_entry.
436 * Next, we remove the mapping from the phys_ent and the PTEG hash list.
438 * Unlock any locks that are left, and exit.
440 * Note that this must be done with both interruptions off and VM off
442 * Note that this code depends upon the VSID being of the format 00SXXXXX
443 * where S is the segment number.
449 .globl EXT(hw_rem_map)
452 #if PERFTIMES && DEBUG
456 bl EXT(dbgLog2) ; Start of hw_add_map
460 mfsprg r9,2 ; Get feature flags
461 mfmsr r0 /* Save the MSR */
462 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
463 mtcrf 0x04,r9 ; Set the features
464 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
466 bt pfNoMSRirb,lmvNoMSR ; No MSR...
468 mtmsr r12 ; Translation and all off
469 isync ; Toss prefetch
475 li r0,loadMSR ; Get the MSR setter SC
476 mr r3,r12 ; Get new MSR
484 lwz r6,mmPTEhash(r3) /* Get pointer to hash list anchor */
485 lwz r5,mmPTEv(r3) /* Get the VSID */
486 dcbt 0,r6 /* We'll need that chain in a bit */
488 rlwinm r7,r6,0,0,25 /* Round hash list down to PCA boundary */
489 li r12,1 /* Get the locked value */
490 subi r6,r6,mmhashnext /* Make the anchor look like an entry */
494 ptegLck1: lwarx r10,0,r7 /* Get the PTEG lock */
495 mr. r10,r10 /* Is it locked? */
496 bne- ptegLckw1 /* Yeah... */
497 stwcx. r12,0,r7 /* Try to take it */
498 bne- ptegLck1 /* Someone else was trying, try again... */
499 b ptegSXg1 /* All done... */
503 ptegLckw1: mr. r10,r10 /* Check if it's already held */
504 beq+ ptegLck1 /* It's clear... */
505 lwz r10,0(r7) /* Get lock word again... */
506 b ptegLckw1 /* Wait... */
510 ptegSXg1: isync /* Make sure we haven't used anything yet */
512 lwz r12,mmhashnext(r3) /* Prime with our forward pointer */
513 lwz r4,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
515 srchmaps: mr. r10,r6 /* Save the previous entry */
516 bne+ mapok /* No error... */
518 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
519 ori r0,r0,LOW_ADDR(Choke)
520 sc /* Firmware Heimlich manuever */
524 mapok: lwz r6,mmhashnext(r6) /* Look at the next one */
525 cmplwi cr5,r4,0 /* Is there a PTE? */
526 cmplw r6,r3 /* Have we found ourselves? */
527 bne+ srchmaps /* Nope, get your head together... */
529 stw r12,mmhashnext(r10) /* Remove us from the queue */
530 rlwinm r9,r5,1,0,3 /* Move in the segment */
531 rlwinm r8,r4,6,4,19 /* Line PTEG disp up to a page */
532 rlwinm r11,r5,5,4,19 /* Line up the VSID */
533 lwz r10,mmphysent(r3) /* Point to the physical entry */
535 beq+ cr5,nopte /* There's no PTE to invalidate... */
537 xor r8,r8,r11 /* Back hash to virt index */
538 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
539 rlwimi r9,r5,22,4,9 /* Move in the API */
540 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
541 mfspr r11,pvr /* Find out what kind of machine we are */
542 rlwimi r9,r8,0,10,19 /* Create the virtual address */
543 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
545 stw r5,0(r4) /* Make the PTE invalid */
547 cmplwi cr1,r11,3 /* Is this a 603? */
548 sync /* Make sure the invalid is stored */
552 tlbhang1: lwarx r5,0,r12 /* Get the TLBIE lock */
553 rlwinm r11,r4,29,29,31 /* Get the bit position of entry */
554 mr. r5,r5 /* Is it locked? */
555 lis r6,0x8000 /* Start up a bit mask */
556 li r5,1 /* Get our lock word */
557 bne- tlbhang1 /* It's locked, go wait... */
558 stwcx. r5,0,r12 /* Try to get it */
559 bne- tlbhang1 /* We was beat... */
561 srw r6,r6,r11 /* Make a "free slot" mask */
562 lwz r5,PCAallo(r7) /* Get the allocation control bits */
563 rlwinm r11,r6,24,8,15 /* Make the autogen bit to turn off */
564 or r5,r5,r6 /* turn on the free bit */
565 rlwimi r11,r11,24,16,23 /* Get lock bit mask to turn it off */
567 andc r5,r5,r11 /* Turn off the lock and autogen bits in allocation flags */
568 li r11,0 /* Lock clear value */
570 tlbie r9 /* Invalidate it everywhere */
573 beq- cr1,its603a /* It's a 603, skip the tlbsync... */
575 eieio /* Make sure that the tlbie happens first */
576 tlbsync /* wait for everyone to catch up */
579 its603a: sync /* Make sure of it all */
580 stw r11,0(r12) /* Clear the tlbie lock */
581 eieio /* Make sure those RC bit are loaded */
582 stw r5,PCAallo(r7) /* Show that the slot is free */
583 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
585 nopte: mr. r10,r10 /* See if there is a physical entry */
586 la r9,pephyslink(r10) /* Point to the physical mapping chain */
587 beq- nophys /* No physical entry, we're done... */
588 beq- cr5,nadamrg /* No PTE to merge... */
590 lwz r6,4(r4) /* Get the latest reference and change bits */
591 la r12,pepte1(r10) /* Point right at the master copy */
592 rlwinm r6,r6,0,23,24 /* Extract just the RC bits */
596 mrgrc: lwarx r8,0,r12 /* Get the master copy */
597 or r8,r8,r6 /* Merge in latest RC */
598 stwcx. r8,0,r12 /* Save it back */
599 bne- mrgrc /* If it changed, try again... */
601 nadamrg: li r11,0 /* Clear this out */
602 lwz r12,mmnext(r3) /* Prime with our next */
603 stw r11,0(r7) /* Unlock the hash chain now so we don't
604 lock out another processor during the
605 our next little search */
608 srchpmap: mr. r10,r9 /* Save the previous entry */
609 bne+ mapok1 /* No error... */
611 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
612 ori r0,r0,LOW_ADDR(Choke)
613 sc /* Firmware Heimlich maneuver */
617 mapok1: lwz r9,mmnext(r9) /* Look at the next one */
618 rlwinm r8,r9,0,27,31 ; Save the flags (including the lock)
619 rlwinm r9,r9,0,0,26 ; Clear out the flags from first link
620 cmplw r9,r3 /* Have we found ourselves? */
621 bne+ srchpmap /* Nope, get your head together... */
623 rlwimi r12,r8,0,27,31 ; Insert the lock and flags */
624 stw r12,mmnext(r10) /* Remove us from the queue */
626 mtmsr r0 /* Interrupts and translation back on */
628 #if PERFTIMES && DEBUG
631 bl EXT(dbgLog2) ; Start of hw_add_map
638 nophys: li r4,0 /* Make sure this is 0 */
639 sync /* Make sure that chain is updated */
640 stw r4,0(r7) /* Unlock the hash chain */
641 mtmsr r0 /* Interrupts and translation back on */
643 #if PERFTIMES && DEBUG
646 bl EXT(dbgLog2) ; Start of hw_add_map
653 * hw_prot(physent, prot) - Change the protection of a physical page
655 * Upon entry, R3 contains a pointer to a physical entry which is locked.
656 * R4 contains the PPC protection bits.
658 * The first thing we do is to slam the new protection into the phys entry.
659 * Then we scan the mappings and process each one.
661 * Acquire the lock on the PTEG hash list for the mapping being processed.
663 * If the current mapping has a PTE entry, we invalidate
664 * it and merge the reference and change information into the phys_entry.
666 * Next, slam the protection bits into the entry and unlock the hash list.
668 * Note that this must be done with both interruptions off and VM off
677 #if PERFTIMES && DEBUG
683 bl EXT(dbgLog2) ; Start of hw_add_map
687 mfsprg r9,2 ; Get feature flags
688 mfmsr r0 /* Save the MSR */
689 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
690 li r5,pepte1 /* Get displacement to the second word of master pte */
691 mtcrf 0x04,r9 ; Set the features
692 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
694 bt pfNoMSRirb,hpNoMSR ; No MSR...
696 mtmsr r12 ; Translation and all off
697 isync ; Toss prefetch
703 li r0,loadMSR ; Get the MSR setter SC
704 mr r3,r12 ; Get new MSR
712 lwz r10,pephyslink(r3) /* Get the first mapping block */
713 rlwinm r10,r10,0,0,26 ; Clear out the flags from first link
716 * Note that we need to to do the interlocked update here because another processor
717 * can be updating the reference and change bits even though the physical entry
718 * is locked. All modifications to the PTE portion of the physical entry must be
719 * done via interlocked update.
724 protcng: lwarx r8,r5,r3 /* Get the master copy */
725 rlwimi r8,r4,0,30,31 /* Move in the protection bits */
726 stwcx. r8,r5,r3 /* Save it back */
727 bne- protcng /* If it changed, try again... */
731 protnext: mr. r10,r10 /* Are there any more mappings? */
732 beq- protdone /* Naw... */
734 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
735 lwz r5,mmPTEv(r10) /* Get the virtual address */
736 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
738 li r12,1 /* Get the locked value */
742 protLck1: lwarx r11,0,r7 /* Get the PTEG lock */
743 mr. r11,r11 /* Is it locked? */
744 bne- protLckw1 /* Yeah... */
745 stwcx. r12,0,r7 /* Try to take it */
746 bne- protLck1 /* Someone else was trying, try again... */
747 b protSXg1 /* All done... */
751 protLckw1: mr. r11,r11 /* Check if it's already held */
752 beq+ protLck1 /* It's clear... */
753 lwz r11,0(r7) /* Get lock word again... */
754 b protLckw1 /* Wait... */
758 protSXg1: isync /* Make sure we haven't used anything yet */
760 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
762 rlwinm r9,r5,1,0,3 /* Move in the segment */
763 lwz r2,mmPTEr(r10) ; Get the mapping copy of the PTE
764 mr. r6,r6 /* See if there is a PTE here */
765 rlwinm r8,r5,31,2,25 /* Line it up */
766 rlwimi r2,r4,0,30,31 ; Move protection bits into the mapping copy
768 beq+ protul /* There's no PTE to invalidate... */
770 xor r8,r8,r6 /* Back hash to virt index */
771 rlwimi r9,r5,22,4,9 /* Move in the API */
772 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
773 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
774 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
775 mfspr r11,pvr /* Find out what kind of machine we are */
776 rlwimi r9,r8,6,10,19 /* Create the virtual address */
777 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
779 stw r5,0(r6) /* Make the PTE invalid */
780 cmplwi cr1,r11,3 /* Is this a 603? */
781 sync /* Make sure the invalid is stored */
785 tlbhangp: lwarx r11,0,r12 /* Get the TLBIE lock */
786 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
787 mr. r11,r11 /* Is it locked? */
788 lis r5,0x8000 /* Start up a bit mask */
789 li r11,1 /* Get our lock word */
790 bne- tlbhangp /* It's locked, go wait... */
791 stwcx. r11,0,r12 /* Try to get it */
792 bne- tlbhangp /* We was beat... */
794 li r11,0 /* Lock clear value */
796 tlbie r9 /* Invalidate it everywhere */
798 beq- cr1,its603p /* It's a 603, skip the tlbsync... */
800 eieio /* Make sure that the tlbie happens first */
801 tlbsync /* wait for everyone to catch up */
804 its603p: stw r11,0(r12) /* Clear the lock */
805 srw r5,r5,r8 /* Make a "free slot" mask */
806 sync /* Make sure of it all */
808 lwz r6,4(r6) /* Get the latest reference and change bits */
809 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
810 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
811 lwz r9,PCAallo(r7) /* Get the allocation control bits */
812 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
813 rlwimi r2,r6,0,23,24 ; Put the latest RC bit in mapping copy
814 or r9,r9,r5 /* Set the slot free */
815 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
816 andc r9,r9,r8 /* Clear the auto and lock bits */
817 li r5,pepte1 /* Get displacement to the second word of master pte */
818 stw r9,PCAallo(r7) /* Store the allocation controls */
821 protmod: lwarx r11,r5,r3 /* Get the master copy */
822 or r11,r11,r6 /* Merge in latest RC */
823 stwcx. r11,r5,r3 /* Save it back */
824 bne- protmod /* If it changed, try again... */
826 sync /* Make sure that chain is updated */
828 protul: li r4,0 /* Get a 0 */
829 stw r2,mmPTEr(r10) ; Save the updated mapping PTE
830 lwz r10,mmnext(r10) /* Get the next */
831 stw r4,0(r7) /* Unlock the hash chain */
832 b protnext /* Go get the next one */
836 protdone: mtmsr r0 /* Interrupts and translation back on */
838 #if PERFTIMES && DEBUG
841 bl EXT(dbgLog2) ; Start of hw_add_map
848 * hw_prot_virt(mapping, prot) - Change the protection of single page
850 * Upon entry, R3 contains a pointer (real) to a mapping.
851 * R4 contains the PPC protection bits.
853 * Acquire the lock on the PTEG hash list for the mapping being processed.
855 * If the current mapping has a PTE entry, we invalidate
856 * it and merge the reference and change information into the phys_entry.
858 * Next, slam the protection bits into the entry, merge the RC bits,
859 * and unlock the hash list.
861 * Note that this must be done with both interruptions off and VM off
867 .globl EXT(hw_prot_virt)
870 #if PERFTIMES && DEBUG
876 bl EXT(dbgLog2) ; Start of hw_add_map
880 mfsprg r9,2 ; Get feature flags
881 mfmsr r0 /* Save the MSR */
882 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
883 mtcrf 0x04,r9 ; Set the features
884 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
886 bt pfNoMSRirb,hpvNoMSR ; No MSR...
888 mtmsr r12 ; Translation and all off
889 isync ; Toss prefetch
895 li r0,loadMSR ; Get the MSR setter SC
896 mr r3,r12 ; Get new MSR
905 * Note that we need to to do the interlocked update here because another processor
906 * can be updating the reference and change bits even though the physical entry
907 * is locked. All modifications to the PTE portion of the physical entry must be
908 * done via interlocked update.
911 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
912 lwz r5,mmPTEv(r3) /* Get the virtual address */
913 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
915 li r12,1 /* Get the locked value */
919 protvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
920 mr. r11,r11 /* Is it locked? */
921 bne- protvLckw1 /* Yeah... */
922 stwcx. r12,0,r7 /* Try to take it */
923 bne- protvLck1 /* Someone else was trying, try again... */
924 b protvSXg1 /* All done... */
928 protvLckw1: mr. r11,r11 /* Check if it's already held */
929 beq+ protvLck1 /* It's clear... */
930 lwz r11,0(r7) /* Get lock word again... */
931 b protvLckw1 /* Wait... */
935 protvSXg1: isync /* Make sure we haven't used anything yet */
937 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
938 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
940 rlwinm r9,r5,1,0,3 /* Move in the segment */
941 cmplwi cr7,r6,0 ; Any PTE to invalidate?
942 rlwimi r2,r4,0,30,31 ; Move in the new protection bits
943 rlwinm r8,r5,31,2,25 /* Line it up */
945 beq+ cr7,pvnophys /* There's no PTE to invalidate... */
947 xor r8,r8,r6 /* Back hash to virt index */
948 rlwimi r9,r5,22,4,9 /* Move in the API */
949 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
950 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
951 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
952 mfspr r11,pvr /* Find out what kind of machine we are */
953 rlwimi r9,r8,6,10,19 /* Create the virtual address */
954 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
956 stw r5,0(r6) /* Make the PTE invalid */
957 cmplwi cr1,r11,3 /* Is this a 603? */
958 sync /* Make sure the invalid is stored */
962 tlbhangpv: lwarx r11,0,r12 /* Get the TLBIE lock */
963 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
964 mr. r11,r11 /* Is it locked? */
965 lis r5,0x8000 /* Start up a bit mask */
966 li r11,1 /* Get our lock word */
967 bne- tlbhangpv /* It's locked, go wait... */
968 stwcx. r11,0,r12 /* Try to get it */
969 bne- tlbhangpv /* We was beat... */
971 li r11,0 /* Lock clear value */
973 tlbie r9 /* Invalidate it everywhere */
975 beq- cr1,its603pv /* It's a 603, skip the tlbsync... */
977 eieio /* Make sure that the tlbie happens first */
978 tlbsync /* wait for everyone to catch up */
981 its603pv: stw r11,0(r12) /* Clear the lock */
982 srw r5,r5,r8 /* Make a "free slot" mask */
983 sync /* Make sure of it all */
985 lwz r6,4(r6) /* Get the latest reference and change bits */
986 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
987 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
988 lwz r9,PCAallo(r7) /* Get the allocation control bits */
989 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
990 lwz r10,mmphysent(r3) ; Get any physical entry
991 or r9,r9,r5 /* Set the slot free */
992 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
993 andc r9,r9,r8 /* Clear the auto and lock bits */
994 mr. r10,r10 ; Is there a physical entry?
995 li r5,pepte1 /* Get displacement to the second word of master pte */
996 stw r9,PCAallo(r7) /* Store the allocation controls */
997 rlwimi r2,r6,0,23,24 ; Stick in RC bits
998 beq- pvnophys ; No physical entry...
1001 lwarx r11,r5,r10 ; ?
1003 protvmod: lwarx r11,r5,r10 /* Get the master copy */
1004 or r11,r11,r6 /* Merge in latest RC */
1005 stwcx. r11,r5,r10 /* Save it back */
1006 bne- protvmod /* If it changed, try again... */
1008 sync /* Make sure that chain is updated */
1010 pvnophys: li r4,0 /* Get a 0 */
1011 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1012 stw r4,0(r7) /* Unlock the hash chain */
1013 mtmsr r0 ; Restore interrupts and translation
1016 #if PERFTIMES && DEBUG
1026 * hw_attr_virt(mapping, attr) - Change the attributes of single page
1028 * Upon entry, R3 contains a pointer (real) to a mapping.
1029 * R4 contains the WIMG bits.
1031 * Acquire the lock on the PTEG hash list for the mapping being processed.
1033 * If the current mapping has a PTE entry, we invalidate
1034 * it and merge the reference and change information into the phys_entry.
1036 * Next, slam the WIMG bits into the entry, merge the RC bits,
1037 * and unlock the hash list.
1039 * Note that this must be done with both interruptions off and VM off
1045 .globl EXT(hw_attr_virt)
1048 #if PERFTIMES && DEBUG
1054 bl EXT(dbgLog2) ; Start of hw_add_map
1058 mfsprg r9,2 ; Get feature flags
1059 mfmsr r0 /* Save the MSR */
1060 mtcrf 0x04,r9 ; Set the features
1061 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1062 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1064 bt pfNoMSRirb,havNoMSR ; No MSR...
1066 mtmsr r12 ; Translation and all off
1067 isync ; Toss prefetch
1073 li r0,loadMSR ; Get the MSR setter SC
1074 mr r3,r12 ; Get new MSR
1081 * Note that we need to to do the interlocked update here because another processor
1082 * can be updating the reference and change bits even though the physical entry
1083 * is locked. All modifications to the PTE portion of the physical entry must be
1084 * done via interlocked update.
1087 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
1088 lwz r5,mmPTEv(r3) /* Get the virtual address */
1089 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1091 li r12,1 /* Get the locked value */
1095 attrvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1096 mr. r11,r11 /* Is it locked? */
1097 bne- attrvLckw1 /* Yeah... */
1098 stwcx. r12,0,r7 /* Try to take it */
1099 bne- attrvLck1 /* Someone else was trying, try again... */
1100 b attrvSXg1 /* All done... */
1104 attrvLckw1: mr. r11,r11 /* Check if it's already held */
1105 beq+ attrvLck1 /* It's clear... */
1106 lwz r11,0(r7) /* Get lock word again... */
1107 b attrvLckw1 /* Wait... */
1111 attrvSXg1: isync /* Make sure we haven't used anything yet */
1113 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
1114 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
1116 rlwinm r9,r5,1,0,3 /* Move in the segment */
1117 mr. r6,r6 /* See if there is a PTE here */
1118 rlwimi r2,r4,0,25,28 ; Move in the new attribute bits
1119 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1121 beq+ avnophys /* There's no PTE to invalidate... */
1123 xor r8,r8,r6 /* Back hash to virt index */
1124 rlwimi r9,r5,22,4,9 /* Move in the API */
1125 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1126 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1127 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1128 mfspr r11,pvr /* Find out what kind of machine we are */
1129 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1130 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
1131 stw r5,0(r6) /* Make the PTE invalid */
1132 cmplwi cr1,r11,3 /* Is this a 603? */
1133 sync /* Make sure the invalid is stored */
1137 tlbhangav: lwarx r11,0,r12 /* Get the TLBIE lock */
1138 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1139 mr. r11,r11 /* Is it locked? */
1140 lis r5,0x8000 /* Start up a bit mask */
1141 li r11,1 /* Get our lock word */
1142 bne- tlbhangav /* It's locked, go wait... */
1143 stwcx. r11,0,r12 /* Try to get it */
1144 bne- tlbhangav /* We was beat... */
1146 li r11,0 /* Lock clear value */
1148 tlbie r9 /* Invalidate it everywhere */
1150 beq- cr1,its603av /* It's a 603, skip the tlbsync... */
1152 eieio /* Make sure that the tlbie happens first */
1153 tlbsync /* wait for everyone to catch up */
1156 its603av: stw r11,0(r12) /* Clear the lock */
1157 srw r5,r5,r8 /* Make a "free slot" mask */
1158 sync /* Make sure of it all */
1160 lwz r6,4(r6) /* Get the latest reference and change bits */
1161 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
1162 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
1163 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1164 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1165 lwz r10,mmphysent(r3) ; Get any physical entry
1166 or r9,r9,r5 /* Set the slot free */
1167 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1168 andc r9,r9,r8 /* Clear the auto and lock bits */
1169 mr. r10,r10 ; Is there a physical entry?
1170 li r5,pepte1 /* Get displacement to the second word of master pte */
1171 stw r9,PCAallo(r7) /* Store the allocation controls */
1172 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1173 beq- avnophys ; No physical entry...
1175 lwarx r11,r5,r10 ; ?
1177 attrvmod: lwarx r11,r5,r10 /* Get the master copy */
1178 or r11,r11,r6 /* Merge in latest RC */
1179 stwcx. r11,r5,r10 /* Save it back */
1180 bne- attrvmod /* If it changed, try again... */
1182 sync /* Make sure that chain is updated */
1184 avnophys: li r4,0 /* Get a 0 */
1185 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1186 stw r4,0(r7) /* Unlock the hash chain */
1188 rlwinm r2,r2,0,0,19 ; Clear back to page boundary
1190 attrflsh: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1191 dcbst r2,r4 ; Flush cache because we changed attributes
1192 addi r4,r4,32 ; Bump up cache
1193 blt+ attrflsh ; Do the whole page...
1197 attrimvl: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1198 dcbi r2,r4 ; Invalidate dcache because we changed attributes
1199 icbi r2,r4 ; Invalidate icache because we changed attributes
1200 icbi r2,r4 ; Invalidate icache because we changed attributes
1201 addi r4,r4,32 ; Bump up cache
1202 blt+ attrimvl ; Do the whole page...
1205 mtmsr r0 ; Restore interrupts and translation
1208 #if PERFTIMES && DEBUG
1218 * hw_pte_comm(physent) - Do something to the PTE pointing to a physical page
1220 * Upon entry, R3 contains a pointer to a physical entry which is locked.
1221 * Note that this must be done with both interruptions off and VM off
1223 * First, we set up CRs 5 and 7 to indicate which of the 7 calls this is.
1225 * Now we scan the mappings to invalidate any with an active PTE.
1227 * Acquire the lock on the PTEG hash list for the mapping being processed.
1229 * If the current mapping has a PTE entry, we invalidate
1230 * it and merge the reference and change information into the phys_entry.
1232 * Next, unlock the hash list and go on to the next mapping.
1239 .globl EXT(hw_inv_all)
1243 li r9,0x800 /* Indicate invalidate all */
1244 li r2,0 ; No inadvertant modifications please
1245 b hw_pte_comm /* Join in the fun... */
1249 .globl EXT(hw_tst_mod)
1253 lwz r8,pepte1(r3) ; Get the saved PTE image
1254 li r9,0x400 /* Indicate test modify */
1255 li r2,0 ; No inadvertant modifications please
1256 rlwinm. r8,r8,25,31,31 ; Make change bit into return code
1257 beq+ hw_pte_comm ; Assume we do not know if it is set...
1258 mr r3,r8 ; Set the return code
1259 blr ; Return quickly...
1262 .globl EXT(hw_tst_ref)
1265 lwz r8,pepte1(r3) ; Get the saved PTE image
1266 li r9,0x200 /* Indicate test reference bit */
1267 li r2,0 ; No inadvertant modifications please
1268 rlwinm. r8,r8,24,31,31 ; Make reference bit into return code
1269 beq+ hw_pte_comm ; Assume we do not know if it is set...
1270 mr r3,r8 ; Set the return code
1271 blr ; Return quickly...
1274 * Note that the following are all in one CR for ease of use later
1277 .globl EXT(hw_set_mod)
1281 li r9,0x008 /* Indicate set modify bit */
1282 li r2,0x4 ; Set set C, clear none
1283 b hw_pte_comm /* Join in the fun... */
1287 .globl EXT(hw_clr_mod)
1291 li r9,0x004 /* Indicate clear modify bit */
1292 li r2,0x1 ; Set set none, clear C
1293 b hw_pte_comm /* Join in the fun... */
1297 .globl EXT(hw_set_ref)
1301 li r9,0x002 /* Indicate set reference */
1302 li r2,0x8 ; Set set R, clear none
1303 b hw_pte_comm /* Join in the fun... */
1306 .globl EXT(hw_clr_ref)
1310 li r9,0x001 /* Indicate clear reference bit */
1311 li r2,0x2 ; Set set none, clear R
1312 b hw_pte_comm /* Join in the fun... */
1316 * This is the common stuff.
1321 hw_pte_comm: /* Common routine for pte tests and manips */
1323 #if PERFTIMES && DEBUG
1329 bl EXT(dbgLog2) ; Start of hw_add_map
1333 mfsprg r8,2 ; Get feature flags
1334 lwz r10,pephyslink(r3) /* Get the first mapping block */
1335 mfmsr r0 /* Save the MSR */
1336 rlwinm. r10,r10,0,0,26 ; Clear out the flags from first link and see if we are mapped
1337 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1338 mtcrf 0x04,r8 ; Set the features
1339 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1340 beq- comnmap ; No mapping
1341 dcbt br0,r10 ; Touch the first mapping in before the isync
1345 bt pfNoMSRirb,hpcNoMSR ; No MSR...
1347 mtmsr r12 ; Translation and all off
1348 isync ; Toss prefetch
1354 li r0,loadMSR ; Get the MSR setter SC
1355 mr r3,r12 ; Get new MSR
1361 mtcrf 0x05,r9 /* Set the call type flags into cr5 and 7 */
1363 beq- commdone ; Nothing us mapped to this page...
1364 b commnext ; Jump to first pass (jump here so we can align loop)
1368 commnext: lwz r11,mmnext(r10) ; Get the pointer to the next mapping (if any)
1369 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
1370 lwz r5,mmPTEv(r10) /* Get the virtual address */
1371 mr. r11,r11 ; More mappings to go?
1372 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1373 beq- commnxtch ; No more mappings...
1374 dcbt br0,r11 ; Touch the next mapping
1376 commnxtch: li r12,1 /* Get the locked value */
1380 commLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1381 mr. r11,r11 /* Is it locked? */
1382 bne- commLckw1 /* Yeah... */
1383 stwcx. r12,0,r7 /* Try to take it */
1384 bne- commLck1 /* Someone else was trying, try again... */
1385 b commSXg1 /* All done... */
1389 commLckw1: mr. r11,r11 /* Check if it's already held */
1390 beq+ commLck1 /* It's clear... */
1391 lwz r11,0(r7) /* Get lock word again... */
1392 b commLckw1 /* Wait... */
1396 commSXg1: isync /* Make sure we haven't used anything yet */
1398 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
1400 rlwinm r9,r5,1,0,3 /* Move in the segment */
1401 mr. r6,r6 /* See if there is a PTE entry here */
1402 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1404 beq+ commul /* There's no PTE to invalidate... */
1406 xor r8,r8,r6 /* Back hash to virt index */
1407 rlwimi r9,r5,22,4,9 /* Move in the API */
1408 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1409 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1410 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1411 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1413 stw r5,0(r6) /* Make the PTE invalid */
1414 mfspr r4,pvr /* Find out what kind of machine we are */
1415 sync /* Make sure the invalid is stored */
1419 tlbhangco: lwarx r11,0,r12 /* Get the TLBIE lock */
1420 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1421 mr. r11,r11 /* Is it locked? */
1422 lis r5,0x8000 /* Start up a bit mask */
1423 li r11,1 /* Get our lock word */
1424 bne- tlbhangco /* It's locked, go wait... */
1425 stwcx. r11,0,r12 /* Try to get it */
1426 bne- tlbhangco /* We was beat... */
1428 rlwinm r4,r4,16,16,31 /* Isolate CPU type */
1429 li r11,0 /* Lock clear value */
1430 cmplwi r4,3 /* Is this a 603? */
1432 tlbie r9 /* Invalidate it everywhere */
1434 beq- its603co /* It's a 603, skip the tlbsync... */
1436 eieio /* Make sure that the tlbie happens first */
1437 tlbsync /* wait for everyone to catch up */
1440 its603co: stw r11,0(r12) /* Clear the lock */
1441 srw r5,r5,r8 /* Make a "free slot" mask */
1442 sync /* Make sure of it all */
1444 lwz r6,4(r6) /* Get the latest reference and change bits */
1445 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1446 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
1447 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1448 or r9,r9,r5 /* Set the slot free */
1449 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1450 rlwinm r4,r6,0,23,24 /* Extract the RC bits */
1451 andc r9,r9,r8 /* Clear the auto and lock bits */
1452 li r5,pepte1 /* Get displacement to the second word of master pte */
1453 stw r9,PCAallo(r7) /* Store the allocation controls */
1456 commmod: lwarx r11,r5,r3 /* Get the master copy */
1457 or r11,r11,r4 /* Merge in latest RC */
1458 stwcx. r11,r5,r3 /* Save it back */
1459 bne- commmod /* If it changed, try again... */
1461 sync /* Make sure that chain is updated */
1462 b commulnl ; Skip loading the old real part...
1464 commul: lwz r6,mmPTEr(r10) ; Get the real part
1466 commulnl: rlwinm r12,r2,5,23,24 ; Get the "set" bits
1467 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1469 or r6,r6,r12 ; Set the bits to come on
1470 andc r6,r6,r11 ; Clear those to come off
1472 stw r6,mmPTEr(r10) ; Set the new RC
1474 lwz r10,mmnext(r10) /* Get the next */
1475 li r4,0 /* Make sure this is 0 */
1476 mr. r10,r10 ; Is there another mapping?
1477 stw r4,0(r7) /* Unlock the hash chain */
1478 bne+ commnext ; Go get the next if there is one...
1481 * Now that all PTEs have been invalidated and the master RC bits are updated,
1482 * we go ahead and figure out what the original call was and do that. Note that
1483 * another processor could be messing around and may have entered one of the
1484 * PTEs we just removed into the hash table. Too bad... You takes yer chances.
1485 * If there's a problem with that, it's because some higher level was trying to
1486 * do something with a mapping that it shouldn't. So, the problem's really
1487 * there, nyaaa, nyaaa, nyaaa... nyaaa, nyaaa... nyaaa! So there!
1490 commdone: li r5,pepte1 /* Get displacement to the second word of master pte */
1491 blt cr5,commfini /* We're finished, it was invalidate all... */
1492 bgt cr5,commtst /* It was a test modified... */
1493 beq cr5,commtst /* It was a test reference... */
1496 * Note that we need to to do the interlocked update here because another processor
1497 * can be updating the reference and change bits even though the physical entry
1498 * is locked. All modifications to the PTE portion of the physical entry must be
1499 * done via interlocked update.
1502 rlwinm r12,r2,5,23,24 ; Get the "set" bits
1503 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1507 commcng: lwarx r8,r5,r3 /* Get the master copy */
1508 or r8,r8,r12 ; Set the bits to come on
1509 andc r8,r8,r11 ; Clear those to come off
1510 stwcx. r8,r5,r3 /* Save it back */
1511 bne- commcng /* If it changed, try again... */
1513 mtmsr r0 /* Interrupts and translation back on */
1515 #if PERFTIMES && DEBUG
1519 bl EXT(dbgLog2) ; Start of hw_add_map
1527 commtst: lwz r8,pepte1(r3) /* Get the PTE */
1528 bne- cr5,commtcb ; This is for the change bit...
1529 mtmsr r0 ; Interrupts and translation back on
1530 rlwinm r3,r8,24,31,31 ; Copy reference bit to bit 31
1531 isync ; Toss prefetching
1532 #if PERFTIMES && DEBUG
1536 bl EXT(dbgLog2) ; Start of hw_add_map
1544 commtcb: rlwinm r3,r8,25,31,31 ; Copy change bit to bit 31
1546 commfini: mtmsr r0 ; Interrupts and translation back on
1547 isync ; Toss prefetching
1549 #if PERFTIMES && DEBUG
1553 bl EXT(dbgLog2) ; Start of hw_add_map
1560 * unsigned int hw_test_rc(mapping *mp, boolean_t reset);
1562 * Test the RC bits for a specific mapping. If reset is non-zero, clear them.
1563 * We return the RC value in the mapping if there is no PTE or if C is set.
1564 * (Note: R is always set with C.) Otherwise we invalidate the PTE and
1565 * collect the RC bits from there, also merging them into the global copy.
1567 * For now, we release the PTE slot and leave it invalid. In the future, we
1568 * may consider re-validating and not releasing the slot. It would be faster,
1569 * but our current implementation says that we will have not PTEs valid
1570 * without the reference bit set.
1572 * We will special case C==1 && not reset to just return the RC.
1574 * Probable state is worst performance state: C bit is off and there is a PTE.
1580 .globl EXT(hw_test_rc)
1584 mfsprg r9,2 ; Get feature flags
1585 mfmsr r0 ; Save the MSR
1586 mr. r4,r4 ; See if we have a reset to do later
1587 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruption mask
1588 crnot htrReset,cr0_eq ; Remember reset
1589 mtcrf 0x04,r9 ; Set the features
1590 rlwinm r12,r12,0,28,25 ; Clear IR and DR
1592 bt pfNoMSRirb,htrNoMSR ; No MSR...
1594 mtmsr r12 ; Translation and all off
1595 isync ; Toss prefetch
1601 li r0,loadMSR ; Get the MSR setter SC
1602 mr r3,r12 ; Get new MSR
1608 lwz r2,mmPTEr(r3) ; Get the real part
1609 lwz r7,mmPTEhash(r3) ; Get pointer to hash list anchor
1610 rlwinm. r12,r2,0,24,24 ; Is the change bit on?
1611 lwz r5,mmPTEv(r3) ; Get the virtual address
1612 crnor cr0_eq,cr0_eq,htrReset ; Set if C=1 && not reset
1613 rlwinm r7,r7,0,0,25 ; Round hash list down to PCA boundary
1614 bt cr0_eq,htrcset ; Special case changed but no reset case...
1616 li r12,1 ; Get the locked value
1618 htrLck1: lwarx r11,0,r7 ; Get the PTEG lock
1619 mr. r11,r11 ; Is it locked?
1620 bne- htrLckw1 ; Yeah...
1621 stwcx. r12,0,r7 ; Try to take it
1622 bne- htrLck1 ; Someone else was trying, try again...
1623 b htrSXg1 ; All done...
1627 htrLckw1: mr. r11,r11 ; Check if it is already held
1628 beq+ htrLck1 ; It is clear...
1629 lwz r11,0(r7) ; Get lock word again...
1630 b htrLckw1 ; Wait...
1634 htrSXg1: isync ; Make sure we have not used anything yet
1636 lwz r6,mmPTEent(r3) ; Get the pointer to the PTE now that the lock is set
1637 lwz r2,mmPTEr(r3) ; Get the mapping copy of the real part
1639 rlwinm r9,r5,1,0,3 ; Move in the segment
1640 mr. r6,r6 ; Any PTE to invalidate?
1641 rlwinm r8,r5,31,2,25 ; Line it up
1643 beq+ htrnopte ; There is no PTE to invalidate...
1645 xor r8,r8,r6 ; Back hash to virt index
1646 rlwimi r9,r5,22,4,9 ; Move in the API
1647 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
1648 rlwinm r5,r5,0,1,31 ; Clear the valid bit
1649 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
1650 mfspr r11,pvr ; Find out what kind of machine we are
1651 rlwimi r9,r8,6,10,19 ; Create the virtual address
1652 rlwinm r11,r11,16,16,31 ; Isolate CPU type
1654 stw r5,0(r6) ; Make the PTE invalid
1655 cmplwi cr1,r11,3 ; Is this a 603?
1656 sync ; Make sure the invalid is stored
1658 htrtlbhang: lwarx r11,0,r12 ; Get the TLBIE lock
1659 rlwinm r8,r6,29,29,31 ; Get the bit position of entry
1660 mr. r11,r11 ; Is it locked?
1661 lis r5,0x8000 ; Start up a bit mask
1662 li r11,1 ; Get our lock word
1663 bne- htrtlbhang ; It is locked, go wait...
1664 stwcx. r11,0,r12 ; Try to get it
1665 bne- htrtlbhang ; We was beat...
1667 li r11,0 ; Lock clear value
1669 tlbie r9 ;Invalidate it everywhere
1671 beq- cr1,htr603 ; It is a 603, skip the tlbsync...
1673 eieio ; Make sure that the tlbie happens first
1674 tlbsync ; wait for everyone to catch up
1677 htr603: stw r11,0(r12) ; Clear the lock
1678 srw r5,r5,r8 ; Make a "free slot" mask
1679 sync ; Make sure of it all
1681 lwz r6,4(r6) ; Get the latest reference and change bits
1682 stw r11,mmPTEent(r3) ; Clear the pointer to the PTE
1683 rlwinm r6,r6,0,23,24 ; Extract the RC bits
1684 lwz r9,PCAallo(r7) ; Get the allocation control bits
1685 rlwinm r8,r5,24,8,15 ; Make the autogen bit to turn off
1686 lwz r10,mmphysent(r3) ; Get any physical entry
1687 or r9,r9,r5 ; Set the slot free
1688 rlwimi r8,r8,24,16,23 ; Get lock bit mask to turn it off
1689 andc r9,r9,r8 ; Clear the auto and lock bits
1690 mr. r10,r10 ; Is there a physical entry?
1691 li r5,pepte1 ; Get displacement to the second word of master pte
1692 stw r9,PCAallo(r7) ; Store the allocation controls
1693 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1694 beq- htrnopte ; No physical entry...
1696 htrmrc: lwarx r11,r5,r10 ; Get the master copy
1697 or r11,r11,r6 ; Merge in latest RC
1698 stwcx. r11,r5,r10 ; Save it back
1699 bne- htrmrc ; If it changed, try again...
1701 sync ; Make sure that chain update is stored
1703 htrnopte: rlwinm r3,r2,25,30,31 ; Position RC and mask off
1704 bf htrReset,htrnorst ; No reset to do...
1705 rlwinm r2,r2,0,25,22 ; Clear the RC if requested
1707 htrnorst: li r4,0 ; Get a 0
1708 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1709 stw r4,0(r7) ; Unlock the hash chain
1711 mtmsr r0 ; Restore interrupts and translation
1717 htrcset: rlwinm r3,r2,25,30,31 ; Position RC and mask off
1718 mtmsr r0 ; Restore interrupts and translation
1724 * hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) - Sets the default physical page attributes
1726 * Note that this must be done with both interruptions off and VM off
1727 * Move the passed in attributes into the pte image in the phys entry
1733 .globl EXT(hw_phys_attr)
1737 #if PERFTIMES && DEBUG
1745 bl EXT(dbgLog2) ; Start of hw_add_map
1751 mfsprg r9,2 ; Get feature flags
1752 mfmsr r0 /* Save the MSR */
1753 andi. r5,r5,0x0078 /* Clean up the WIMG */
1754 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1755 mtcrf 0x04,r9 ; Set the features
1756 rlwimi r5,r4,0,30,31 /* Move the protection into the wimg register */
1757 la r6,pepte1(r3) /* Point to the default pte */
1758 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1760 bt pfNoMSRirb,hpaNoMSR ; No MSR...
1762 mtmsr r12 ; Translation and all off
1763 isync ; Toss prefetch
1769 li r0,loadMSR ; Get the MSR setter SC
1770 mr r3,r12 ; Get new MSR
1776 atmattr: lwarx r10,0,r6 /* Get the pte */
1777 rlwimi r10,r5,0,25,31 /* Move in the new attributes */
1778 stwcx. r10,0,r6 /* Try it on for size */
1779 bne- atmattr /* Someone else was trying, try again... */
1781 mtmsr r0 /* Interrupts and translation back on */
1783 #if PERFTIMES && DEBUG
1787 bl EXT(dbgLog2) ; Start of hw_add_map
1790 blr /* All done... */
1795 * handlePF - handle a page fault interruption
1797 * If the fault can be handled, this routine will RFI directly,
1798 * otherwise it will return with all registers as in entry.
1800 * Upon entry, state and all registers have been saved in savearea.
1801 * This is pointed to by R13.
1802 * IR and DR are off, interrupts are masked,
1803 * Floating point be disabled.
1804 * R3 is the interrupt code.
1806 * If we bail, we must restore cr5, and all registers except 6 and
1812 .globl EXT(handlePF)
1817 * This first part does a quick check to see if we can handle the fault.
1818 * We can't handle any kind of protection exceptions here, so we pass
1819 * them up to the next level.
1821 * The mapping lists are kept in MRS (most recently stolen)
1822 * order on queues anchored within from the
1823 * PTEG to which the virtual address hashes. This is further segregated by
1824 * the low-order 3 bits of the VSID XORed with the segment number and XORed
1825 * with bits 4-7 of the vaddr in an attempt to keep the searches
1828 * MRS is handled by moving the entry to the head of its list when stolen in the
1829 * assumption that it will be revalidated soon. Entries are created on the head
1830 * of the list because they will be used again almost immediately.
1832 * We need R13 set to the savearea, R3 set to the interrupt code, and R2
1833 * set to the per_proc.
1835 * NOTE: In order for a page-fault redrive to work, the translation miss
1836 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
1837 * before we come here.
1840 cmplwi r3,T_INSTRUCTION_ACCESS /* See if this is for the instruction */
1841 lwz r8,savesrr1(r13) ; Get the MSR to determine mode
1842 beq- gotIfetch ; We have an IFETCH here...
1844 lwz r7,savedsisr(r13) /* Get the DSISR */
1845 lwz r6,savedar(r13) /* Get the fault address */
1846 b ckIfProt ; Go check if this is a protection fault...
1848 gotIfetch: mr r7,r8 ; IFETCH info is in SRR1
1849 lwz r6,savesrr0(r13) /* Get the instruction address */
1851 ckIfProt: rlwinm. r7,r7,0,1,1 ; Is this a protection exception?
1852 beqlr- ; Yes... (probably not though)
1855 * We will need to restore registers if we bail after this point.
1856 * Note that at this point several SRs have been changed to the kernel versions.
1857 * Therefore, for these we must build these values.
1860 #if PERFTIMES && DEBUG
1865 bl EXT(dbgLog2) ; Start of hw_add_map
1870 lwz r3,PP_USERPMAP(r2) ; Get the user pmap (not needed if kernel access, but optimize for user??)
1871 rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Supervisor state access?
1872 rlwinm r5,r6,6,26,29 ; Get index to the segment slot
1873 eqv r1,r1,r1 ; Fill the bottom with foxes
1874 bne+ notsuper ; Go do the user mode interrupt stuff...
1876 cmplwi cr1,r5,SR_COPYIN_NUM*4 ; See if this is the copyin/copyout segment
1877 rlwinm r3,r6,24,8,11 ; Make the kernel VSID
1878 bne+ cr1,havevsid ; We are done if we do not want the copyin/out guy...
1880 mfsr r3,SR_COPYIN ; Get the copy vsid
1881 b havevsid ; Join up...
1885 notsuper: addi r5,r5,PMAP_SEGS ; Get offset to table
1886 lwzx r3,r3,r5 ; Get the VSID
1888 havevsid: mfspr r5,sdr1 /* Get hash table base and size */
1889 cror cr1_eq,cr0_eq,cr0_eq ; Remember if kernel fault for later
1890 rlwinm r9,r6,2,2,5 ; Move nybble 1 up to 0 (keep aligned with VSID)
1891 rlwimi r1,r5,16,0,15 /* Make table size -1 out of mask */
1892 rlwinm r3,r3,6,2,25 /* Position the space for the VSID */
1893 rlwinm r7,r6,26,10,25 /* Isolate the page index */
1894 xor r9,r9,r3 ; Splooch vaddr nybble 0 (from VSID) and 1 together
1895 or r8,r5,r1 /* Point to the last byte in table */
1896 xor r7,r7,r3 /* Get primary hash */
1897 rlwinm r3,r3,1,1,24 /* Position VSID for pte ID */
1898 addi r8,r8,1 /* Point to the PTEG Control Area */
1899 rlwinm r9,r9,8,27,29 ; Get splooched bits in place
1900 and r7,r7,r1 /* Wrap the hash */
1901 rlwimi r3,r6,10,26,31 /* Move API into pte ID */
1902 add r8,r8,r7 /* Point to our PCA entry */
1903 rlwinm r12,r3,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
1904 la r11,PCAhash(r8) /* Point to the mapping hash area */
1905 xor r9,r9,r12 ; Finish splooching nybble 0, 1, and the low bits of the VSID
1909 * We have about as much as we need to start searching the autogen (aka block maps)
1910 * and mappings. From here on, any kind of failure will bail, and
1911 * contention will either bail or restart from here.
1916 li r12,1 /* Get the locked value */
1917 dcbt 0,r11 /* We'll need the hash area in a sec, so get it */
1918 add r11,r11,r9 /* Point to the right mapping hash slot */
1921 ptegLck: lwarx r10,0,r8 /* Get the PTEG lock */
1922 mr. r10,r10 /* Is it locked? */
1923 bne- ptegLckw /* Yeah... */
1924 stwcx. r12,0,r8 /* Take take it */
1925 bne- ptegLck /* Someone else was trying, try again... */
1926 b ptegSXg /* All done... */
1930 ptegLckw: mr. r10,r10 /* Check if it's already held */
1931 beq+ ptegLck /* It's clear... */
1932 lwz r10,0(r8) /* Get lock word again... */
1933 b ptegLckw /* Wait... */
1937 nop ; Force ISYNC to last instruction in IFETCH
1941 ptegSXg: isync /* Make sure we haven't used anything yet */
1943 lwz r9,0(r11) /* Pick up first mapping block */
1944 mr r5,r11 /* Get the address of the anchor */
1945 mr r7,r9 /* Save the first in line */
1946 b findmap ; Take space and force loop to cache line
1948 findmap: mr. r12,r9 /* Are there more? */
1949 beq- tryAuto /* Nope, nothing in mapping list for us... */
1951 lwz r10,mmPTEv(r12) /* Get unique PTE identification */
1952 lwz r9,mmhashnext(r12) /* Get the chain, just in case */
1953 cmplw r10,r3 /* Did we hit our PTE? */
1954 lwz r0,mmPTEent(r12) /* Get the pointer to the hash table entry */
1955 mr r5,r12 /* Save the current as previous */
1956 bne- findmap ; Nothing here, try the next...
1958 ; Cache line boundary here
1960 cmplwi cr1,r0,0 /* Is there actually a PTE entry in the hash? */
1961 lwz r2,mmphysent(r12) /* Get the physical entry */
1962 bne- cr1,MustBeOK /* There's an entry in the hash table, so, this must
1963 have been taken care of already... */
1964 lis r4,0x8000 ; Tell PTE inserter that this was not an auto
1965 cmplwi cr2,r2,0 /* Is there a physical entry? */
1966 li r0,0x0100 /* Force on the reference bit whenever we make a PTE valid */
1967 bne+ cr2,gotphys /* Skip down if we have a physical entry */
1968 li r0,0x0180 /* When there is no physical entry, force on
1969 both R and C bits to keep hardware from
1970 updating the PTE to set them. We don't
1971 keep track of RC for I/O areas, so this is ok */
1973 gotphys: lwz r2,mmPTEr(r12) ; Get the second part of the PTE
1974 b insert /* Go insert into the PTEG... */
1976 MustBeOK: li r10,0 /* Get lock clear value */
1977 li r3,T_IN_VAIN /* Say that we handled it */
1978 stw r10,PCAlock(r8) /* Clear the PTEG lock */
1980 #if PERFTIMES && DEBUG
1984 bl EXT(dbgLog2) ; Start of hw_add_map
1988 blr /* Blow back and handle exception */
1993 * We couldn't find it in the mapping list. As a last try, we will
1994 * see if we can autogen it from the block mapped list.
1996 * A block mapped area is defined as a contiguous virtual area that is mapped to
1997 * a contiguous physical area. The olde-tyme IBM VM/XA Interpretive Execution
1998 * architecture referred to this as a V=F, or Virtual = Fixed area.
2000 * We consider a V=F area to be a single entity, adjacent areas can not be merged
2001 * or overlapped. The protection and memory attributes are the same and reference
2002 * and change indications are not kept. The areas are not considered part of the
2003 * physical RAM of the machine and do not have any associated physical table
2004 * entries. Their primary use is intended for mapped I/O areas (e.g., framebuffers)
2005 * although certain areas of RAM, such as the kernel V=R memory, can be mapped.
2007 * We also have a problem in the case of copyin/out: that access is done
2008 * within the kernel for a user address. Unfortunately, the user isn't
2009 * necessarily the current guy. That means that we don't have access to the
2010 * right autogen list. We can't support this kind of access. So, we need to do
2011 * a quick check here and cause a fault if an attempt to copyin or out to
2012 * any autogenned area.
2014 * The lists must be kept short.
2016 * NOTE: kernel_pmap_store must be in V=R storage!!!!!!!!!!!!!!
2021 tryAuto: rlwinm. r11,r3,0,5,24 ; Check if this is a kernel VSID
2022 lis r10,HIGH_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the top part of kernel block map anchor
2023 crandc cr0_eq,cr1_eq,cr0_eq ; Set if kernel access and non-zero VSID (copyin or copyout)
2024 mfsprg r11,0 ; Get the per_proc area
2025 beq- cr0,realFault ; Can not autogen for copyin/copyout...
2026 ori r10,r10,LOW_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the bottom part
2027 beq- cr1,bmInKernel ; We are in kernel... (cr1 set way back at entry)
2029 lwz r10,PP_USERPMAP(r11) ; Get the user pmap
2030 la r10,PMAP_BMAPS(r10) ; Point to the chain anchor
2031 b bmInKernel ; Jump over alignment gap...
2039 #ifndef CHIP_ERRATA_MAX_V1
2041 #endif /* CHIP_ERRATA_MAX_V1 */
2043 bmapLck: lwarx r9,0,r10 ; Get the block map anchor and lock
2044 rlwinm. r5,r9,0,31,31 ; Is it locked?
2045 ori r5,r5,1 ; Set the lock
2046 bne- bmapLckw ; Yeah...
2047 stwcx. r5,0,r10 ; Lock the bmap list
2048 bne- bmapLck ; Someone else was trying, try again...
2049 b bmapSXg ; All done...
2053 bmapLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2054 beq+ bmapLck ; Not no more...
2055 lwz r9,0(r10) ; Get lock word again...
2056 b bmapLckw ; Check it out...
2060 nop ; Force ISYNC to last instruction in IFETCH
2064 bmapSXg: rlwinm. r4,r9,0,0,26 ; Clear out flags and lock
2065 isync ; Make sure we have not used anything yet
2066 bne+ findAuto ; We have something, let us go...
2068 bmapNone: stw r9,0(r10) ; Unlock it, we have nothing here
2069 ; No sync here because we have not changed anything
2072 * When we come here, we know that we can't handle this. Restore whatever
2073 * state that we trashed and go back to continue handling the interrupt.
2076 realFault: li r10,0 /* Get lock clear value */
2077 lwz r3,saveexception(r13) /* Figure out the exception code again */
2078 stw r10,PCAlock(r8) /* Clear the PTEG lock */
2079 #if PERFTIMES && DEBUG
2083 bl EXT(dbgLog2) ; Start of hw_add_map
2087 blr /* Blow back and handle exception */
2091 findAuto: mr. r4,r4 ; Is there more?
2092 beq- bmapNone ; No more...
2093 lwz r5,bmstart(r4) ; Get the bottom of range
2094 lwz r11,bmend(r4) ; Get the top of range
2095 cmplw cr0,r6,r5 ; Are we before the entry?
2096 cmplw cr1,r6,r11 ; Are we after the entry?
2097 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2098 bne+ cr1,faGot ; Found it...
2100 lwz r4,bmnext(r4) ; Get the next one
2101 b findAuto ; Check it out...
2103 faGot: rlwinm r6,r6,0,0,19 ; Round to page
2104 lwz r2,bmPTEr(r4) ; Get the real part of the PTE
2105 sub r5,r6,r5 ; Get offset into area
2106 stw r9,0(r10) ; Unlock it, we are done with it (no sync needed)
2107 add r2,r2,r5 ; Adjust the real address
2109 lis r4,0x8080 /* Indicate that this was autogened */
2110 li r0,0x0180 /* Autogenned areas always set RC bits.
2111 This keeps the hardware from having
2112 to do two storage writes */
2115 * Here where we insert the PTE into the hash. The PTE image is in R3, R2.
2116 * The PTEG allocation controls are a bit map of the state of the PTEG. The
2117 * PCAlock bits are a temporary lock for the specified PTE. PCAfree indicates that
2118 * the PTE slot is empty. PCAauto means that it comes from an autogen area. These
2119 * guys do not keep track of reference and change and are actually "wired".
2120 * They're easy to maintain. PCAsteal
2121 * is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
2122 * fields fit in a single word and are loaded and stored under control of the
2123 * PTEG control area lock (PCAlock).
2125 * Note that PCAauto does not contribute to the steal calculations at all. Originally
2126 * it did, autogens were second in priority. This can result in a pathalogical
2127 * case where an instruction can not make forward progress, or one PTE slot
2130 * Physically, the fields are arranged:
2137 insert: lwz r10,PCAallo(r8) /* Get the PTEG controls */
2138 eqv r6,r6,r6 /* Get all ones */
2139 mr r11,r10 /* Make a copy */
2140 rlwimi r6,r10,8,16,23 /* Insert sliding steal position */
2141 rlwimi r11,r11,24,24,31 /* Duplicate the locked field */
2142 addi r6,r6,-256 /* Form mask */
2143 rlwimi r11,r11,16,0,15 /* This gives us a quadrupled lock mask */
2144 rlwinm r5,r10,31,24,0 /* Slide over the mask for next time */
2145 mr r9,r10 /* Make a copy to test */
2146 not r11,r11 /* Invert the quadrupled lock */
2147 or r2,r2,r0 /* Force on R, and maybe C bit */
2148 and r9,r9,r11 /* Remove the locked guys */
2149 rlwimi r5,r5,8,24,24 /* Wrap bottom bit to top in mask */
2150 rlwimi r9,r11,0,16,31 /* Put two copies of the unlocked entries at the end */
2151 rlwinm r6,r6,0,16,7 ; Remove the autogens from the priority calculations
2152 rlwimi r10,r5,0,24,31 /* Move steal map back in */
2153 and r9,r9,r6 /* Set the starting point for stealing */
2155 /* So, now we have in R9:
2156 byte 0 = ~locked & free
2158 byte 2 = ~locked & (PCAsteal - 1)
2161 Each bit position represents (modulo 8) a PTE. If it is 1, it is available for
2162 allocation at its priority level, left to right.
2164 Additionally, the PCA steal field in R10 has been rotated right one bit.
2168 rlwinm r21,r10,8,0,7 ; Isolate just the old autogen bits
2169 cntlzw r6,r9 /* Allocate a slot */
2170 mr r14,r12 /* Save our mapping for later */
2171 cmplwi r6,32 ; Was there anything available?
2172 rlwinm r7,r6,29,30,31 /* Get the priority slot we got this from */
2173 rlwinm r6,r6,0,29,31 ; Isolate bit position
2174 srw r11,r4,r6 /* Position the PTEG control bits */
2175 slw r21,r21,r6 ; Move corresponding old autogen flag to bit 0
2176 mr r22,r11 ; Get another copy of the selected slot
2178 beq- realFault /* Arghh, no slots! Take the long way 'round... */
2180 /* Remember, we've already set up the mask pattern
2181 depending upon how we got here:
2182 if got here from simple mapping, R4=0x80000000,
2183 if we got here from autogen it is 0x80800000. */
2185 rlwinm r6,r6,3,26,28 /* Start calculating actual PTE address */
2186 rlwimi r22,r22,24,8,15 ; Duplicate selected slot in second byte
2187 rlwinm. r11,r11,0,8,15 /* Isolate just the auto bit (remember about it too) */
2188 andc r10,r10,r22 /* Turn off the free and auto bits */
2189 add r6,r8,r6 /* Get position into PTEG control area */
2190 cmplwi cr1,r7,1 /* Set the condition based upon the old PTE type */
2191 sub r6,r6,r1 /* Switch it to the hash table */
2192 or r10,r10,r11 /* Turn auto on if it is (PTEG control all set up now) */
2193 subi r6,r6,1 /* Point right */
2194 stw r10,PCAallo(r8) /* Allocate our slot */
2195 dcbt br0,r6 ; Touch in the PTE
2196 bne wasauto /* This was autogenned... */
2198 stw r6,mmPTEent(r14) /* Link the mapping to the PTE slot */
2201 * So, now we're here and what exactly do we have? We've got:
2202 * 1) a full PTE entry, both top and bottom words in R3 and R2
2203 * 2) an allocated slot in the PTEG.
2204 * 3) R8 still points to the PTEG Control Area (PCA)
2205 * 4) R6 points to the PTE entry.
2206 * 5) R1 contains length of the hash table-1. We use this to back-translate
2207 * a PTE to a virtual address so we can invalidate TLBs.
2208 * 6) R11 has a copy of the PCA controls we set.
2209 * 7a) R7 indicates what the PTE slot was before we got to it. 0 shows
2210 * that it was empty and 2 or 3, that it was
2211 * a we've stolen a live one. CR1 is set to LT for empty and GT
2213 * 7b) Bit 0 of R21 is 1 if the stolen PTE was autogenned
2214 * 8) So far as our selected PTE, it should be valid if it was stolen
2215 * and invalid if not. We could put some kind of assert here to
2216 * check, but I think that I'd rather leave it in as a mysterious,
2217 * non-reproducable bug.
2218 * 9) The new PTE's mapping has been moved to the front of its PTEG hash list
2219 * so that it's kept in some semblance of a MRU list.
2220 * 10) R14 points to the mapping we're adding.
2222 * So, what do we have to do yet?
2223 * 1) If we stole a slot, we need to invalidate the PTE completely.
2224 * 2) If we stole one AND it was not an autogen,
2225 * copy the entire old PTE (including R and C bits) to its mapping.
2226 * 3) Set the new PTE in the PTEG and make sure it is valid.
2227 * 4) Unlock the PTEG control area.
2228 * 5) Go back to the interrupt handler, changing the interrupt
2229 * code to "in vain" which will restore the registers and bail out.
2232 wasauto: oris r3,r3,0x8000 /* Turn on the valid bit */
2233 blt+ cr1,slamit /* It was empty, go slam it on in... */
2235 lwz r10,0(r6) /* Grab the top part of the PTE */
2236 rlwinm r12,r6,6,4,19 /* Match up the hash to a page boundary */
2237 rlwinm r5,r10,5,4,19 /* Extract the VSID to a page boundary */
2238 rlwinm r10,r10,0,1,31 /* Make it invalid */
2239 xor r12,r5,r12 /* Calculate vaddr */
2240 stw r10,0(r6) /* Invalidate the PTE */
2241 rlwinm r5,r10,7,27,29 ; Move nybble 0 up to subhash position
2242 rlwimi r12,r10,1,0,3 /* Move in the segment portion */
2243 lis r9,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
2244 xor r5,r5,r10 ; Splooch nybble 0 and 1
2245 rlwimi r12,r10,22,4,9 /* Move in the API */
2246 ori r9,r9,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
2247 rlwinm r4,r10,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
2249 sync /* Make sure the invalid is stored */
2251 xor r4,r4,r5 ; Finish splooching nybble 0, 1, and the low bits of the VSID
2255 tlbhang: lwarx r5,0,r9 /* Get the TLBIE lock */
2257 rlwinm r4,r4,0,27,29 ; Clean up splooched hash value
2259 mr. r5,r5 /* Is it locked? */
2260 add r4,r4,r8 /* Point to the offset into the PCA area */
2261 li r5,1 /* Get our lock word */
2262 bne- tlbhang /* It's locked, go wait... */
2264 la r4,PCAhash(r4) /* Point to the start of the hash chain for the PTE we're replacing */
2266 stwcx. r5,0,r9 /* Try to get it */
2267 bne- tlbhang /* We was beat... */
2269 mfspr r7,pvr /* Find out what kind of machine we are */
2270 li r5,0 /* Lock clear value */
2271 rlwinm r7,r7,16,16,31 /* Isolate CPU type */
2273 tlbie r12 /* Invalidate it everywhere */
2275 cmplwi r7,3 /* Is this a 603? */
2276 stw r5,0(r9) /* Clear the lock */
2278 beq- its603 /* It's a 603, skip the tlbsync... */
2280 eieio /* Make sure that the tlbie happens first */
2281 tlbsync /* wait for everyone to catch up */
2284 its603: rlwinm. r21,r21,0,0,0 ; See if we just stole an autogenned entry
2285 sync /* Make sure of it all */
2287 bne slamit ; The old was an autogen, time to slam the new in...
2289 lwz r9,4(r6) /* Get the real portion of old PTE */
2290 lwz r7,0(r4) /* Get the first element. We can't get to here
2291 if we aren't working with a mapping... */
2292 mr r0,r7 ; Save pointer to first element
2294 findold: mr r1,r11 ; Save the previous guy
2295 mr. r11,r7 /* Copy and test the chain */
2296 beq- bebad /* Assume it's not zero... */
2298 lwz r5,mmPTEv(r11) /* See if this is the old active one */
2299 cmplw cr2,r11,r14 /* Check if this is actually the new one */
2300 cmplw r5,r10 /* Is this us? (Note: valid bit kept off in mappings) */
2301 lwz r7,mmhashnext(r11) /* Get the next one in line */
2302 beq- cr2,findold /* Don't count the new one... */
2303 cmplw cr2,r11,r0 ; Check if we are first on the list
2304 bne+ findold /* Not it (and assume the worst)... */
2306 lwz r12,mmphysent(r11) /* Get the pointer to the physical entry */
2307 beq- cr2,nomove ; We are first, no need to requeue...
2309 stw r11,0(r4) ; Chain us to the head
2310 stw r0,mmhashnext(r11) ; Chain the old head to us
2311 stw r7,mmhashnext(r1) ; Unlink us
2313 nomove: li r5,0 /* Clear this on out */
2315 mr. r12,r12 /* Is there a physical entry? */
2316 stw r5,mmPTEent(r11) ; Clear the PTE entry pointer
2317 li r5,pepte1 /* Point to the PTE last half */
2318 stw r9,mmPTEr(r11) ; Squirrel away the whole thing (RC bits are in here)
2320 beq- mrgmrcx ; No physical entry for this one...
2322 rlwinm r11,r9,0,23,24 /* Keep only the RC bits */
2326 mrgmrcx: lwarx r9,r5,r12 /* Get the master copy */
2327 or r9,r9,r11 /* Merge in latest RC */
2328 stwcx. r9,r5,r12 /* Save it back */
2329 bne- mrgmrcx /* If it changed, try again... */
2332 * Here's where we finish up. We save the real part of the PTE, eieio it, to make sure it's
2333 * out there before the top half (with the valid bit set).
2336 slamit: stw r2,4(r6) /* Stash the real part */
2337 li r4,0 /* Get a lock clear value */
2338 eieio /* Erect a barricade */
2339 stw r3,0(r6) /* Stash the virtual part and set valid on */
2341 stw r4,PCAlock(r8) /* Clear the PCA lock */
2343 li r3,T_IN_VAIN /* Say that we handled it */
2344 sync /* Go no further until the stores complete */
2345 #if PERFTIMES && DEBUG
2349 bl EXT(dbgLog2) ; Start of hw_add_map
2353 blr /* Back to the fold... */
2355 bebad: lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
2356 ori r0,r0,LOW_ADDR(Choke)
2357 sc /* Firmware Heimlich maneuver */
2360 * This walks the hash table or DBATs to locate the physical address of a virtual one.
2361 * The space is provided. If it is the kernel space, the DBATs are searched first. Failing
2362 * that, the hash table is accessed. Zero is returned for failure, so it must be special cased.
2363 * This is usually used for debugging, so we try not to rely
2364 * on anything that we don't have to.
2367 ENTRY(LRA, TAG_NO_FRAME_USED)
2369 mfsprg r8,2 ; Get feature flags
2370 mfmsr r10 /* Save the current MSR */
2371 mtcrf 0x04,r8 ; Set the features
2372 xoris r5,r3,HIGH_ADDR(PPC_SID_KERNEL) /* Clear the top half if equal */
2373 andi. r9,r10,0x7FCF /* Turn off interrupts and translation */
2374 eqv r12,r12,r12 /* Fill the bottom with foxes */
2376 bt pfNoMSRirb,lraNoMSR ; No MSR...
2378 mtmsr r9 ; Translation and all off
2379 isync ; Toss prefetch
2384 li r0,loadMSR ; Get the MSR setter SC
2385 mr r3,r9 ; Get new MSR
2390 cmplwi r5,LOW_ADDR(PPC_SID_KERNEL) /* See if this is kernel space */
2391 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
2392 isync /* Purge pipe */
2393 bne- notkernsp /* This is not for the kernel... */
2395 mfspr r5,dbat0u /* Get the virtual address and length */
2396 eqv r8,r8,r8 /* Get all foxes */
2397 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2398 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2399 beq- ckbat1 /* not valid, skip this one... */
2400 sub r7,r4,r7 /* Subtract out the base */
2401 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2402 mfspr r6,dbat0l /* Get the real part */
2403 cmplw r7,r8 /* Check if it is in the range */
2404 bng+ fndbat /* Yup, she's a good un... */
2406 ckbat1: mfspr r5,dbat1u /* Get the virtual address and length */
2407 eqv r8,r8,r8 /* Get all foxes */
2408 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2409 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2410 beq- ckbat2 /* not valid, skip this one... */
2411 sub r7,r4,r7 /* Subtract out the base */
2412 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2413 mfspr r6,dbat1l /* Get the real part */
2414 cmplw r7,r8 /* Check if it is in the range */
2415 bng+ fndbat /* Yup, she's a good un... */
2417 ckbat2: mfspr r5,dbat2u /* Get the virtual address and length */
2418 eqv r8,r8,r8 /* Get all foxes */
2419 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2420 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2421 beq- ckbat3 /* not valid, skip this one... */
2422 sub r7,r4,r7 /* Subtract out the base */
2423 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2424 mfspr r6,dbat2l /* Get the real part */
2425 cmplw r7,r8 /* Check if it is in the range */
2426 bng- fndbat /* Yup, she's a good un... */
2428 ckbat3: mfspr r5,dbat3u /* Get the virtual address and length */
2429 eqv r8,r8,r8 /* Get all foxes */
2430 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2431 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2432 beq- notkernsp /* not valid, skip this one... */
2433 sub r7,r4,r7 /* Subtract out the base */
2434 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2435 mfspr r6,dbat3l /* Get the real part */
2436 cmplw r7,r8 /* Check if it is in the range */
2437 bgt+ notkernsp /* No good... */
2439 fndbat: rlwinm r6,r6,0,0,14 /* Clean up the real address */
2440 mtmsr r10 /* Restore state */
2441 add r3,r7,r6 /* Relocate the offset to real */
2442 isync /* Purge pipe */
2443 blr /* Bye, bye... */
2444 notkernsp: mfspr r5,sdr1 /* Get hash table base and size */
2445 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
2446 rlwimi r12,r5,16,0,15 /* Make table size -1 out of mask */
2447 rlwinm r7,r4,26,10,25 /* Isolate the page index */
2448 andc r5,r5,r12 /* Clean up the hash table */
2449 xor r7,r7,r11 /* Get primary hash */
2450 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
2451 and r7,r7,r12 /* Wrap the hash */
2452 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
2453 add r5,r7,r5 /* Point to the PTEG */
2454 oris r11,r11,0x8000 /* Slam on valid bit so's we don't match an invalid one */
2456 li r9,8 /* Get the number of PTEs to check */
2457 lwz r6,0(r5) /* Preload the virtual half */
2459 fndpte: subi r9,r9,1 /* Count the pte */
2460 lwz r3,4(r5) /* Get the real half */
2461 cmplw cr1,r6,r11 /* Is this what we want? */
2462 lwz r6,8(r5) /* Start to get the next virtual half */
2463 mr. r9,r9 /* Any more to try? */
2464 addi r5,r5,8 /* Bump to next slot */
2465 beq cr1,gotxlate /* We found what we were looking for... */
2466 bne+ fndpte /* Go try the next PTE... */
2468 mtmsr r10 /* Restore state */
2469 li r3,0 /* Show failure */
2470 isync /* Purge pipe */
2473 gotxlate: mtmsr r10 /* Restore state */
2474 rlwimi r3,r4,0,20,31 /* Cram in the page displacement */
2475 isync /* Purge pipe */
2481 * struct blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr)
2483 * This is used to add a block mapping entry to the MRU list whose top
2484 * node is anchored at bmaps. This is a real address and is also used as
2487 * Overlapping areas are not allowed. If we find one, we return it's address and
2488 * expect the upper layers to panic. We only check this for a debug build...
2493 .globl EXT(hw_add_blk)
2497 mfsprg r9,2 ; Get feature flags
2498 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2499 mfmsr r0 /* Save the MSR */
2500 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2501 mtcrf 0x04,r9 ; Set the features
2502 xor r3,r3,r6 ; Get real address of bmap anchor
2503 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2504 la r3,PMAP_BMAPS(r3) ; Point to bmap header
2506 bt pfNoMSRirb,habNoMSR ; No MSR...
2508 mtmsr r12 ; Translation and all off
2509 isync ; Toss prefetch
2515 li r0,loadMSR ; Get the MSR setter SC
2516 mr r3,r12 ; Get new MSR
2522 abLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2523 rlwinm. r8,r9,0,31,31 ; Is it locked?
2524 ori r8,r9,1 ; Set the lock
2525 bne- abLckw ; Yeah...
2526 stwcx. r8,0,r3 ; Lock the bmap list
2527 bne- abLck ; Someone else was trying, try again...
2528 b abSXg ; All done...
2532 abLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2533 beq+ abLck ; Not no more...
2534 lwz r9,0(r3) ; Get lock word again...
2535 b abLckw ; Check it out...
2539 nop ; Force ISYNC to last instruction in IFETCH
2542 abSXg: rlwinm r11,r9,0,0,26 ; Clear out flags and lock
2543 isync ; Make sure we have not used anything yet
2549 lwz r7,bmstart(r4) ; Get start
2550 lwz r8,bmend(r4) ; Get end
2551 mr r2,r11 ; Get chain
2553 abChk: mr. r10,r2 ; End of chain?
2554 beq abChkD ; Yes, chain is ok...
2555 lwz r5,bmstart(r10) ; Get start of current area
2556 lwz r6,bmend(r10) ; Get end of current area
2558 cmplw cr0,r8,r5 ; Is the end of the new before the old?
2559 cmplw cr1,r8,r6 ; Is the end of the new after the old?
2560 cmplw cr6,r6,r7 ; Is the end of the old before the new?
2561 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in old
2562 cmplw cr7,r6,r8 ; Is the end of the old after the new?
2563 lwz r2,bmnext(r10) ; Get pointer to the next
2564 cror cr6_eq,cr6_lt,cr7_gt ; Set cr2_eq if old not in new
2565 crand cr1_eq,cr1_eq,cr6_eq ; Set cr1_eq if no overlap
2566 beq+ cr1,abChk ; Ok check the next...
2568 stw r9,0(r3) ; Unlock
2569 mtmsr r0 ; Restore xlation and rupts
2570 mr r3,r10 ; Pass back the overlap
2574 abChkD: stw r11,bmnext(r4) ; Chain this on in
2575 rlwimi r4,r9,0,27,31 ; Copy in locks and flags
2576 sync ; Make sure that is done
2578 stw r4,0(r3) ; Unlock and chain the new first one
2579 mtmsr r0 ; Restore xlation and rupts
2580 li r3,0 ; Pass back a no failure return code
2586 * struct blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2588 * This is used to remove a block mapping entry from the list that
2589 * is anchored at bmaps. bmaps is a virtual address and is also used as
2592 * Note that this function clears a single block that contains
2593 * any address within the range sva to eva (inclusive). To entirely
2594 * clear any range, hw_rem_blk must be called repeatedly until it
2597 * The block is removed from the list and all hash table entries
2598 * corresponding to the mapped block are invalidated and the TLB
2599 * entries are purged. If the block is large, this could take
2600 * quite a while. We need to hash every possible address in the
2601 * range and lock down the PCA.
2603 * If we attempt to remove a permanent entry, we will not do it.
2604 * The block address will be ored with 1 and returned.
2610 .globl EXT(hw_rem_blk)
2614 mfsprg r9,2 ; Get feature flags
2615 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2616 mfmsr r0 /* Save the MSR */
2617 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2618 mtcrf 0x04,r9 ; Set the features
2619 xor r3,r3,r6 ; Get real address of bmap anchor
2620 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2621 la r3,PMAP_BMAPS(r3) ; Point to the bmap chain head
2623 bt pfNoMSRirb,hrbNoMSR ; No MSR...
2625 mtmsr r12 ; Translation and all off
2626 isync ; Toss prefetch
2632 li r0,loadMSR ; Get the MSR setter SC
2633 mr r3,r12 ; Get new MSR
2639 rbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2640 rlwinm. r8,r9,0,31,31 ; Is it locked?
2641 ori r8,r9,1 ; Set the lock
2642 bne- rbLckw ; Yeah...
2643 stwcx. r8,0,r3 ; Lock the bmap list
2644 bne- rbLck ; Someone else was trying, try again...
2645 b rbSXg ; All done...
2649 rbLckw: rlwinm. r11,r9,0,31,31 ; Check if it is still held
2650 beq+ rbLck ; Not no more...
2651 lwz r9,0(r3) ; Get lock word again...
2652 b rbLckw ; Check it out...
2656 nop ; Force ISYNC to last instruction in IFETCH
2659 rbSXg: rlwinm. r2,r9,0,0,26 ; Clear out flags and lock
2660 mr r10,r3 ; Keep anchor as previous pointer
2661 isync ; Make sure we have not used anything yet
2663 beq- rbMT ; There is nothing in the list
2665 rbChk: mr r12,r10 ; Save the previous
2666 mr. r10,r2 ; End of chain?
2667 beq rbMT ; Yes, nothing to do...
2668 lwz r11,bmstart(r10) ; Get start of current area
2669 lwz r6,bmend(r10) ; Get end of current area
2671 cmplw cr0,r5,r11 ; Is the end of range before the start of the area?
2672 cmplw cr1,r4,r6 ; Is the start of range after the end of the area?
2673 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2674 lwz r2,bmnext(r10) ; Get the next one
2675 beq+ cr1,rbChk ; Not this one, check the next...
2677 lwz r8,blkFlags(r10) ; Get the flags
2679 cmplw cr1,r12,r3 ; Did we delete the first one?
2680 rlwinm. r8,r8,0,blkPermbit,blkPermbit ; is this a permanent block?
2681 bne cr1,rbnFirst ; Nope...
2682 rlwimi r9,r2,0,0,26 ; Change the lock value
2683 ori r2,r9,1 ; Turn on the lock bit
2685 rbnFirst: bne- rbPerm ; This is permanent, do not remove...
2686 lwz r8,bmspace(r10) ; Get the VSID
2687 stw r2,bmnext(r12) ; Unchain us
2690 stw r9,0(r3) ; Unlock and chain the new first one
2692 eqv r4,r4,r4 ; Fill the bottom with foxes
2693 mfspr r12,sdr1 ; Get hash table base and size
2694 rlwinm r8,r8,6,0,25 ; Align VSID to PTEG
2695 rlwimi r4,r12,16,0,15 ; Make table size - 1 out of mask
2696 andc r12,r12,r4 ; Clean up address of hash table
2697 rlwinm r5,r11,26,6,25 ; Rotate virtual start address into PTEG units
2698 add r12,r12,r4 ; Point to PCA - 1
2699 rlwinm r6,r6,26,6,25 ; Rotate virtual end address into PTEG units
2700 addi r12,r12,1 ; Point to PCA base
2701 sub r6,r6,r5 ; Get the total number of PTEGs to clear
2702 cmplw r6,r4 ; See if this wraps all the way around
2703 blt rbHash ; Nope, length is right
2704 subi r6,r4,32+31 ; Back down to correct length
2706 rbHash: rlwinm r5,r5,0,10,25 ; Keep only the page index
2707 xor r2,r8,r5 ; Hash into table
2708 and r2,r2,r4 ; Wrap into the table
2709 add r2,r2,r12 ; Point right at the PCA
2711 rbLcka: lwarx r7,0,r2 ; Get the PTEG lock
2712 mr. r7,r7 ; Is it locked?
2713 bne- rbLckwa ; Yeah...
2714 li r7,1 ; Get the locked value
2715 stwcx. r7,0,r2 ; Take it
2716 bne- rbLcka ; Someone else was trying, try again...
2717 b rbSXga ; All done...
2719 rbLckwa: mr. r7,r7 ; Check if it is already held
2720 beq+ rbLcka ; It is clear...
2721 lwz r7,0(r2) ; Get lock word again...
2724 rbSXga: isync ; Make sure nothing used yet
2725 lwz r7,PCAallo(r2) ; Get the allocation word
2726 rlwinm. r11,r7,8,0,7 ; Isolate the autogenerated PTEs
2727 or r7,r7,r11 ; Release the autogen slots
2728 beq+ rbAintNone ; There are not any here
2729 mtcrf 0xC0,r11 ; Set the branch masks for autogens
2730 sub r11,r2,r4 ; Move back to the hash table + 1
2731 rlwinm r7,r7,0,16,7 ; Clear the autogen field
2732 subi r11,r11,1 ; Point to the PTEG
2733 stw r7,PCAallo(r2) ; Update the flags
2734 li r7,0 ; Get an invalid PTE value
2736 bf 0,rbSlot1 ; No autogen here
2737 stw r7,0x00(r11) ; Invalidate PTE
2738 rbSlot1: bf 1,rbSlot2 ; No autogen here
2739 stw r7,0x08(r11) ; Invalidate PTE
2740 rbSlot2: bf 2,rbSlot3 ; No autogen here
2741 stw r7,0x10(r11) ; Invalidate PTE
2742 rbSlot3: bf 3,rbSlot4 ; No autogen here
2743 stw r7,0x18(r11) ; Invalidate PTE
2744 rbSlot4: bf 4,rbSlot5 ; No autogen here
2745 stw r7,0x20(r11) ; Invalidate PTE
2746 rbSlot5: bf 5,rbSlot6 ; No autogen here
2747 stw r7,0x28(r11) ; Invalidate PTE
2748 rbSlot6: bf 6,rbSlot7 ; No autogen here
2749 stw r7,0x30(r11) ; Invalidate PTE
2750 rbSlot7: bf 7,rbSlotx ; No autogen here
2751 stw r7,0x38(r11) ; Invalidate PTE
2754 rbAintNone: li r7,0 ; Clear this out
2755 sync ; To make SMP happy
2756 addic. r6,r6,-64 ; Decrement the count
2757 stw r7,PCAlock(r2) ; Release the PTEG lock
2758 addi r5,r5,64 ; Move up by adjusted page number
2759 bge+ rbHash ; Not done...
2761 sync ; Make sure the memory is quiet
2764 ; Here we take the easy way out and just purge the entire TLB. This is
2765 ; certainly faster and definitly easier than blasting just the correct ones
2766 ; in the range, we only need one lock and one TLBSYNC. We would hope
2767 ; that most blocks are more than 64 pages (256K) and on every machine
2768 ; up to Book E, 64 TLBIEs will invalidate the entire table.
2771 li r5,64 ; Get number of TLB entries to purge
2772 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
2773 li r6,0 ; Start at 0
2774 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
2776 rbTlbL: lwarx r2,0,r12 ; Get the TLBIE lock
2777 mr. r2,r2 ; Is it locked?
2778 li r2,1 ; Get our lock value
2779 bne- rbTlbL ; It is locked, go wait...
2780 stwcx. r2,0,r12 ; Try to get it
2781 bne- rbTlbL ; We was beat...
2783 rbTlbN: addic. r5,r5,-1 ; See if we did them all
2784 tlbie r6 ; Invalidate it everywhere
2785 addi r6,r6,0x1000 ; Up to the next page
2786 bgt+ rbTlbN ; Make sure we have done it all...
2788 mfspr r5,pvr ; Find out what kind of machine we are
2789 li r2,0 ; Lock clear value
2791 rlwinm r5,r5,16,16,31 ; Isolate CPU type
2792 cmplwi r5,3 ; Is this a 603?
2793 sync ; Make sure all is quiet
2794 beq- rbits603a ; It is a 603, skip the tlbsync...
2796 eieio ; Make sure that the tlbie happens first
2797 tlbsync ; wait for everyone to catch up
2800 rbits603a: sync ; Wait for quiet again
2801 stw r2,0(r12) ; Unlock invalidates
2803 sync ; Make sure that is done
2805 mtmsr r0 ; Restore xlation and rupts
2806 mr r3,r10 ; Pass back the removed block
2810 rbMT: stw r9,0(r3) ; Unlock
2811 mtmsr r0 ; Restore xlation and rupts
2812 li r3,0 ; Say we did not find one
2816 rbPerm: stw r9,0(r3) ; Unlock
2817 mtmsr r0 ; Restore xlation and rupts
2818 ori r3,r10,1 ; Say we did not remove it
2824 * vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va)
2826 * This is used to translate a virtual address within a block mapping entry
2827 * to a physical address. If not found, 0 is returned.
2832 .globl EXT(hw_cvp_blk)
2836 mfsprg r9,2 ; Get feature flags
2837 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2838 mfmsr r0 /* Save the MSR */
2839 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2840 mtcrf 0x04,r9 ; Set the features
2841 xor r3,r3,r6 ; Get real address of bmap anchor
2842 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2843 la r3,PMAP_BMAPS(r3) ; Point to chain header
2845 bt pfNoMSRirb,hcbNoMSR ; No MSR...
2847 mtmsr r12 ; Translation and all off
2848 isync ; Toss prefetch
2854 li r0,loadMSR ; Get the MSR setter SC
2855 mr r3,r12 ; Get new MSR
2861 cbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2862 rlwinm. r8,r9,0,31,31 ; Is it locked?
2863 ori r8,r9,1 ; Set the lock
2864 bne- cbLckw ; Yeah...
2865 stwcx. r8,0,r3 ; Lock the bmap list
2866 bne- cbLck ; Someone else was trying, try again...
2867 b cbSXg ; All done...
2871 cbLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2872 beq+ cbLck ; Not no more...
2873 lwz r9,0(r3) ; Get lock word again...
2874 b cbLckw ; Check it out...
2878 nop ; Force ISYNC to last instruction in IFETCH
2884 cbSXg: rlwinm. r11,r9,0,0,26 ; Clear out flags and lock
2885 li r2,0 ; Assume we do not find anything
2886 isync ; Make sure we have not used anything yet
2888 cbChk: mr. r11,r11 ; Is there more?
2889 beq- cbDone ; No more...
2890 lwz r5,bmstart(r11) ; Get the bottom of range
2891 lwz r12,bmend(r11) ; Get the top of range
2892 cmplw cr0,r4,r5 ; Are we before the entry?
2893 cmplw cr1,r4,r12 ; Are we after of the entry?
2894 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2895 beq- cr1,cbNo ; We are not in the range...
2897 lwz r2,bmPTEr(r11) ; Get the real part of the PTE
2898 sub r5,r4,r5 ; Get offset into area
2899 rlwinm r2,r2,0,0,19 ; Clean out everything but the page
2900 add r2,r2,r5 ; Adjust the real address
2902 cbDone: stw r9,0(r3) ; Unlock it, we are done with it (no sync needed)
2903 mtmsr r0 ; Restore translation and interrupts...
2904 isync ; Make sure it is on
2905 mr r3,r2 ; Set return physical address
2910 cbNo: lwz r11,bmnext(r11) ; Link next
2911 b cbChk ; Check it out...
2915 * hw_set_user_space(pmap)
2916 * hw_set_user_space_dis(pmap)
2918 * Indicate whether memory space needs to be switched.
2919 * We really need to turn off interrupts here, because we need to be non-preemptable
2921 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
2922 * register usage here. The VMM switch code in vmachmon.s that calls this
2923 * know what registers are in use. Check that if these change.
2929 .globl EXT(hw_set_user_space)
2931 LEXT(hw_set_user_space)
2933 mfmsr r10 /* Get the current MSR */
2934 rlwinm r9,r10,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off 'rupts */
2935 mtmsr r9 /* Disable 'em */
2936 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
2937 lwz r4,PMAP_SPACE(r3) ; Get the space
2938 mfsprg r6,0 /* Get the per_proc_info address */
2939 xor r3,r3,r7 ; Get real address of bmap anchor
2940 stw r4,PP_USERSPACE(r6) /* Show our new address space */
2941 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
2942 mtmsr r10 /* Restore interruptions */
2946 .globl EXT(hw_set_user_space_dis)
2948 LEXT(hw_set_user_space_dis)
2950 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
2951 lwz r4,PMAP_SPACE(r3) ; Get the space
2952 mfsprg r6,0 ; Get the per_proc_info address
2953 xor r3,r3,r7 ; Get real address of bmap anchor
2954 stw r4,PP_USERSPACE(r6) ; Show our new address space
2955 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
2959 /* struct mapping *hw_cpv(struct mapping *mp) - Converts a physcial mapping CB address to virtual
2968 rlwinm. r4,r3,0,0,19 ; Round back to the mapping block allocation control block
2969 mfmsr r10 ; Get the current MSR
2970 beq- hcpvret ; Skip if we are passed a 0...
2971 andi. r9,r10,0x7FEF ; Turn off interrupts and data translation
2972 mtmsr r9 ; Disable DR and EE
2975 lwz r4,mbvrswap(r4) ; Get the conversion value
2976 mtmsr r10 ; Interrupts and DR back on
2978 xor r3,r3,r4 ; Convert to physical
2980 hcpvret: rlwinm r3,r3,0,0,26 ; Clean out any flags
2984 /* struct mapping *hw_cvp(struct mapping *mp) - Converts a virtual mapping CB address to physcial
2986 * Translation must be on for this
2995 rlwinm r4,r3,0,0,19 ; Round back to the mapping block allocation control block
2996 rlwinm r3,r3,0,0,26 ; Clean out any flags
2997 lwz r4,mbvrswap(r4) ; Get the conversion value
2998 xor r3,r3,r4 ; Convert to virtual
3002 /* int mapalc(struct mappingblok *mb) - Finds, allocates, and checks a free mapping entry in a block
3004 * Lock must already be held on mapping block list
3005 * returns 0 if all slots filled.
3006 * returns n if a slot is found and it is not the last
3007 * returns -n if a slot os found and it is the last
3008 * when n and -n are returned, the corresponding bit is cleared
3017 lwz r4,mbfree(r3) ; Get the first mask
3018 lis r0,0x8000 ; Get the mask to clear the first free bit
3019 lwz r5,mbfree+4(r3) ; Get the second mask
3020 mr r12,r3 ; Save the return
3021 cntlzw r8,r4 ; Get first free field
3022 lwz r6,mbfree+8(r3) ; Get the third mask
3023 srw. r9,r0,r8 ; Get bit corresponding to first free one
3024 lwz r7,mbfree+12(r3) ; Get the fourth mask
3025 cntlzw r10,r5 ; Get first free field in second word
3026 andc r4,r4,r9 ; Turn it off
3027 bne malcfnd0 ; Found one...
3029 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3030 cntlzw r11,r6 ; Get first free field in third word
3031 andc r5,r5,r9 ; Turn it off
3032 bne malcfnd1 ; Found one...
3034 srw. r9,r0,r11 ; Get bit corresponding to first free one in third word
3035 cntlzw r10,r7 ; Get first free field in fourth word
3036 andc r6,r6,r9 ; Turn it off
3037 bne malcfnd2 ; Found one...
3039 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3040 li r3,0 ; Assume abject failure
3041 andc r7,r7,r9 ; Turn it off
3042 beqlr ; There are none any left...
3044 addi r3,r10,96 ; Set the correct bit number
3045 stw r7,mbfree+12(r12) ; Actually allocate the slot
3047 mapafin: or r4,r4,r5 ; Merge the first two allocation maps
3048 or r6,r6,r7 ; Then the last two
3049 or. r4,r4,r6 ; Merge both halves
3050 bnelr+ ; Return if some left for next time...
3052 neg r3,r3 ; Indicate we just allocated the last one
3055 malcfnd0: stw r4,mbfree(r12) ; Actually allocate the slot
3056 mr r3,r8 ; Set the correct bit number
3057 b mapafin ; Exit now...
3059 malcfnd1: stw r5,mbfree+4(r12) ; Actually allocate the slot
3060 addi r3,r10,32 ; Set the correct bit number
3061 b mapafin ; Exit now...
3063 malcfnd2: stw r6,mbfree+8(r12) ; Actually allocate the slot
3064 addi r3,r11,64 ; Set the correct bit number
3065 b mapafin ; Exit now...
3069 * Log out all memory usage
3077 mfmsr r2 ; Get the MSR
3078 lis r10,hi16(EXT(DebugWork)) ; High part of area
3079 lis r12,hi16(EXT(mem_actual)) ; High part of actual
3080 andi. r0,r10,0x7FCF ; Interrupts and translation off
3081 ori r10,r10,lo16(EXT(DebugWork)) ; Get the entry
3082 mtmsr r0 ; Turn stuff off
3083 ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual
3088 stw r0,4(r10) ; Force logging off
3089 lwz r0,0(r12) ; Get the end of memory
3091 lis r12,hi16(EXT(mem_size)) ; High part of defined memory
3092 ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory
3093 lwz r12,0(r12) ; Make it end of defined
3095 cmplw r0,r12 ; Is there room for the data?
3096 ble- logmemexit ; No, do not even try...
3098 stw r12,0(r12) ; Set defined memory size
3099 stw r0,4(r12) ; Set the actual amount of memory
3101 lis r3,hi16(EXT(hash_table_base)) ; Hash table address
3102 lis r4,hi16(EXT(hash_table_size)) ; Hash table size
3103 lis r5,hi16(EXT(pmap_mem_regions)) ; Memory regions
3104 lis r6,hi16(EXT(mapCtl)) ; Mappings
3105 ori r3,r3,lo16(EXT(hash_table_base))
3106 ori r4,r4,lo16(EXT(hash_table_size))
3107 ori r5,r5,lo16(EXT(pmap_mem_regions))
3108 ori r6,r6,lo16(EXT(mapCtl))
3111 lwz r5,4(r5) ; Get the pointer to the phys_ent table
3112 lwz r6,0(r6) ; Get the pointer to the current mapping block
3113 stw r3,8(r12) ; Save the hash table address
3114 stw r4,12(r12) ; Save the hash table size
3115 stw r5,16(r12) ; Save the physent pointer
3116 stw r6,20(r12) ; Save the mappings
3118 addi r11,r12,0x1000 ; Point to area to move hash table and PCA
3120 add r4,r4,r4 ; Double size for both
3122 copyhash: lwz r7,0(r3) ; Copy both of them
3135 rlwinm r4,r12,20,12,31 ; Get number of phys_ents
3137 copyphys: lwz r7,0(r5) ; Copy physents
3146 addi r11,r11,4095 ; Round up to next page
3147 rlwinm r11,r11,0,0,19
3149 lwz r4,4(r6) ; Get the size of the mapping area
3151 copymaps: lwz r7,0(r6) ; Copy the mappings
3164 sub r11,r11,r12 ; Get the total length we saved
3165 stw r11,24(r12) ; Save the size
3167 logmemexit: mtmsr r2 ; Back to normal