2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 #include <db_machine_commands.h>
28 #include <mach_debug.h>
30 #include <ppc/proc_reg.h>
31 #include <ppc/exception.h>
32 #include <ppc/Performance.h>
33 #include <ppc/exception.h>
34 #include <ppc/pmap_internals.h>
35 #include <mach/ppc/vm_param.h>
42 * Random notes and musings...
44 * Access to mappings via the PTEG hash must be done with the list locked.
45 * Access via the physical entries is controlled by the physent lock.
46 * Access to mappings is controlled by the PTEG lock once they are queued.
47 * If they are not on the list, they don't really exist, so
48 * only one processor at a time can find them, so no access control is needed.
50 * The second half of the PTE is kept in the physical entry. It is done this
51 * way, because there may be multiple mappings that refer to the same physical
52 * page (i.e., address aliases or synonymns). We must do it this way, because
53 * maintenance of the reference and change bits becomes nightmarish if each mapping
54 * has its own. One side effect of this, and not necessarily a bad one, is that
55 * all mappings for a single page can have a single WIMG, protection state, and RC bits.
56 * The only "bad" thing, is the reference bit. With a single copy, we can not get
57 * a completely accurate working set calculation, i.e., we can't tell which mapping was
58 * used to reference the page, all we can tell is that the physical page was
61 * The master copys of the reference and change bits are kept in the phys_entry.
62 * Other than the reference and change bits, changes to the phys_entry are not
63 * allowed if it has any mappings. The master reference and change bits must be
64 * changed via atomic update.
66 * Invalidating a PTE merges the RC bits into the phys_entry.
68 * Before checking the reference and/or bits, ALL mappings to the physical page are
71 * PTEs are never explicitly validated, they are always faulted in. They are also
72 * not visible outside of the hw_vm modules. Complete seperation of church and state.
74 * Removal of a mapping is invalidates its PTE.
76 * So, how do we deal with mappings to I/O space? We don't have a physent for it.
77 * Within the mapping is a copy of the second half of the PTE. This is used
78 * ONLY when there is no physical entry. It is swapped into the PTE whenever
79 * it is built. There is no need to swap it back out, because RC is not
80 * maintained for these mappings.
82 * So, I'm starting to get concerned about the number of lwarx/stcwx loops in
83 * this. Satisfying a mapped address with no stealing requires one lock. If we
84 * steal an entry, there's two locks and an atomic update. Invalidation of an entry
85 * takes one lock and, if there is a PTE, another lock and an atomic update. Other
86 * operations are multiples (per mapping) of the above. Maybe we should look for
87 * an alternative. So far, I haven't found one, but I haven't looked hard.
91 /* hw_add_map(struct mapping *mp, space_t space, vm_offset_t va) - Adds a mapping
93 * Adds a mapping to the PTEG hash list.
95 * Interrupts must be disabled before calling.
97 * Using the space and the virtual address, we hash into the hash table
98 * and get a lock on the PTEG hash chain. Then we chain the
99 * mapping to the front of the list.
104 .globl EXT(hw_add_map)
108 #if PERFTIMES && DEBUG
112 bl EXT(dbgLog2) ; Start of hw_add_map
117 mfmsr r0 /* Get the MSR */
118 eqv r6,r6,r6 /* Fill the bottom with foxes */
119 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
120 rlwinm r11,r4,6,6,25 /* Position the space for the VSID */
121 mfspr r10,sdr1 /* Get hash table base and size */
122 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
123 rlwimi r11,r5,30,2,5 /* Insert the segment no. to make a VSID */
124 mfsprg r12,2 ; Get feature flags
125 rlwimi r6,r10,16,0,15 /* Make table size -1 out of mask */
126 rlwinm r7,r5,26,10,25 /* Isolate the page index */
127 or r8,r10,r6 /* Point to the last byte in table */
128 rlwinm r9,r5,4,0,3 ; Move nybble 1 up to 0
129 xor r7,r7,r11 /* Get primary hash */
130 mtcrf 0x04,r12 ; Set the features
131 andi. r12,r0,0x7FCF /* Disable translation and interruptions */
132 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
133 addi r8,r8,1 /* Point to the PTEG Control Area */
134 xor r9,r9,r5 ; Splooch vaddr nybble 0 and 1 together
135 and r7,r7,r6 /* Wrap the hash */
136 rlwimi r11,r5,10,26,31 /* Move API into pte ID */
137 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
138 add r8,r8,r7 /* Point to our PCA entry */
139 rlwinm r10,r4,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
141 bt pfNoMSRirb,hamNoMSR ; No MSR...
143 mtmsr r12 ; Translation and all off
144 isync ; Toss prefetch
147 hamNoMSR: mr r4,r0 ; Save R0
149 li r0,loadMSR ; Get the MSR setter SC
150 mr r3,r12 ; Get new MSR
156 la r4,PCAhash(r8) /* Point to the mapping hash area */
157 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
158 isync /* Get rid of anything prefetched before we ref storage */
160 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
161 * and word 0 of the PTE (the virtual part).
163 * Now, we just lock the PCA.
166 li r12,1 /* Get the locked value */
167 dcbt 0,r4 /* We'll need the hash area in a sec, so get it */
168 add r4,r4,r9 /* Point to the right mapping hash slot */
170 ptegLckx: lwarx r10,0,r8 /* Get the PTEG lock */
171 mr. r10,r10 /* Is it locked? */
172 bne- ptegLckwx /* Yeah... */
173 stwcx. r12,0,r8 /* Take take it */
174 bne- ptegLckx /* Someone else was trying, try again... */
175 b ptegSXgx /* All done... */
179 ptegLckwx: mr. r10,r10 /* Check if it's already held */
180 beq+ ptegLckx /* It's clear... */
181 lwz r10,0(r8) /* Get lock word again... */
182 b ptegLckwx /* Wait... */
186 ptegSXgx: isync /* Make sure we haven't used anything yet */
188 lwz r7,0(r4) /* Pick up the anchor of hash list */
189 stw r3,0(r4) /* Save the new head */
190 stw r7,mmhashnext(r3) /* Chain in the old head */
192 stw r4,mmPTEhash(r3) /* Point to the head of the hash list */
194 sync /* Make sure the chain is updated */
195 stw r10,0(r8) /* Unlock the hash list */
196 mtmsr r0 /* Restore translation and interruptions */
197 isync /* Toss anything done with DAT off */
198 #if PERFTIMES && DEBUG
202 bl EXT(dbgLog2) ; end of hw_add_map
209 /* mp=hw_lock_phys_vir(space, va) - Finds and locks a physical entry by vaddr.
211 * Returns the mapping with the associated physent locked if found, or a
212 * zero and no lock if not. It we timed out trying to get a the lock on
213 * the physical entry, we retun a 1. A physical entry can never be on an
214 * odd boundary, so we can distinguish between a mapping and a timeout code.
216 * Interrupts must be disabled before calling.
218 * Using the space and the virtual address, we hash into the hash table
219 * and get a lock on the PTEG hash chain. Then we search the chain for the
220 * mapping for our virtual address. From there, we extract the pointer to
221 * the physical entry.
223 * Next comes a bit of monkey business. we need to get a lock on the physical
224 * entry. But, according to our rules, we can't get it after we've gotten the
225 * PTEG hash lock, we could deadlock if we do. So, we need to release the
226 * hash lock. The problem is, though, that as soon as we release it, some
227 * other yahoo may remove our mapping between the time that we release the
228 * hash lock and obtain the phys entry lock. So, we can't count on the
229 * mapping once we release the lock. Instead, after we lock the phys entry,
230 * we search the mapping list (phys_link) for our translation. If we don't find it,
231 * we unlock the phys entry, bail out, and return a 0 for the mapping address. If we
232 * did find it, we keep the lock and return the address of the mapping block.
234 * What happens when a mapping is found, but there is no physical entry?
235 * This is what happens when there is I/O area mapped. It one of these mappings
236 * is found, the mapping is returned, as is usual for this call, but we don't
237 * try to lock anything. There could possibly be some problems here if another
238 * processor releases the mapping while we still alre using it. Hope this
239 * ain't gonna happen.
241 * Taaa-dahhh! Easy as pie, huh?
243 * So, we have a few hacks hacks for running translate off in here.
244 * First, when we call the lock routine, we have carnel knowlege of the registers is uses.
245 * That way, we don't need a stack frame, which we can't have 'cause the stack is in
246 * virtual storage. But wait, as if that's not enough... We need one more register. So,
247 * we cram the LR into the CTR and return from there.
251 .globl EXT(hw_lock_phys_vir)
253 LEXT(hw_lock_phys_vir)
255 #if PERFTIMES && DEBUG
259 bl EXT(dbgLog2) ; Start of hw_add_map
263 mfmsr r12 /* Get the MSR */
264 eqv r6,r6,r6 /* Fill the bottom with foxes */
265 mfsprg r9,2 ; Get feature flags
266 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
267 rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
268 mfspr r5,sdr1 /* Get hash table base and size */
269 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
270 mtcrf 0x04,r9 ; Set the features
271 rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
272 rlwimi r6,r5,16,0,15 /* Make table size -1 out of mask */
273 andi. r0,r12,0x7FCF /* Disable translation and interruptions */
274 rlwinm r9,r4,4,0,3 ; Move nybble 1 up to 0
275 rlwinm r7,r4,26,10,25 /* Isolate the page index */
276 or r8,r5,r6 /* Point to the last byte in table */
277 xor r7,r7,r11 /* Get primary hash */
278 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
279 addi r8,r8,1 /* Point to the PTEG Control Area */
280 xor r9,r9,r4 ; Splooch vaddr nybble 0 and 1 together
281 and r7,r7,r6 /* Wrap the hash */
282 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
283 rlwinm r9,r9,6,27,29 ; Get splooched bits in place
284 add r8,r8,r7 /* Point to our PCA entry */
285 rlwinm r10,r3,2,27,29 ; Get low 3 bits of the VSID for look-aside hash
287 bt pfNoMSRirb,hlpNoMSR ; No MSR...
289 mtmsr r0 ; Translation and all off
290 isync ; Toss prefetch
293 hlpNoMSR: mr r3,r0 ; Get the new MSR
294 li r0,loadMSR ; Get the MSR setter SC
298 la r3,PCAhash(r8) /* Point to the mapping hash area */
299 xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID
300 isync /* Make sure translation is off before we ref storage */
303 * We've now got the address of our PCA, the hash chain anchor, our API subhash,
304 * and word 0 of the PTE (the virtual part).
306 * Now, we just lock the PCA and find our mapping, if it exists.
309 dcbt 0,r3 /* We'll need the hash area in a sec, so get it */
310 add r3,r3,r9 /* Point to the right mapping hash slot */
312 ptegLcka: lwarx r10,0,r8 /* Get the PTEG lock */
313 li r5,1 /* Get the locked value */
314 mr. r10,r10 /* Is it locked? */
315 bne- ptegLckwa /* Yeah... */
316 stwcx. r5,0,r8 /* Take take it */
317 bne- ptegLcka /* Someone else was trying, try again... */
318 b ptegSXga /* All done... */
322 ptegLckwa: mr. r10,r10 /* Check if it's already held */
323 beq+ ptegLcka /* It's clear... */
324 lwz r10,0(r8) /* Get lock word again... */
325 b ptegLckwa /* Wait... */
329 ptegSXga: isync /* Make sure we haven't used anything yet */
331 mflr r0 /* Get the LR */
332 lwz r9,0(r3) /* Pick up the first mapping block */
333 mtctr r0 /* Stuff it into the CTR */
337 mr. r3,r9 /* Did we hit the end? */
338 bne+ chkmapa /* Nope... */
340 stw r3,0(r8) /* Unlock the PTEG lock
341 Note: we never saved anything while we
342 had the lock, so we don't need a sync
343 before we unlock it */
345 vbail: mtmsr r12 /* Restore translation and interruptions */
346 isync /* Make sure translation is cool */
347 #if PERFTIMES && DEBUG
351 bl EXT(dbgLog2) ; Start of hw_add_map
355 bctr /* Return in abject failure... */
359 chkmapa: lwz r10,mmPTEv(r3) /* Pick up our virtual ID */
360 lwz r9,mmhashnext(r3) /* Pick up next mapping block */
361 cmplw r10,r11 /* Have we found ourself? */
362 bne- findmapa /* Nope, still wandering... */
364 lwz r9,mmphysent(r3) /* Get our physical entry pointer */
365 li r5,0 /* Clear this out */
366 mr. r9,r9 /* Is there, like, a physical entry? */
367 stw r5,0(r8) /* Unlock the PTEG lock
368 Note: we never saved anything while we
369 had the lock, so we don't need a sync
370 before we unlock it */
372 beq- vbail /* If there is no physical entry, it's time
375 /* Here we want to call hw_lock_bit. We don't want to use the stack, 'cause it's
376 * in virtual storage, and we're in real. So, we've carefully looked at the code
377 * in hw_lock_bit (and unlock) and cleverly don't use any of the registers that it uses.
378 * Be very, very aware of how you change this code. By the way, it uses:
379 * R0, R6, R7, R8, and R9. R3, R4, and R5 contain parameters
380 * Unfortunatly, we need to stash R9 still. So... Since we know we will not be interrupted
381 * ('cause we turned off interruptions and translation is off) we will use SPRG3...
384 lwz r10,mmPTEhash(r3) /* Save the head of the hash-alike chain. We need it to find ourselves later */
385 lis r5,HIGH_ADDR(EXT(LockTimeOut)) /* Get address of timeout value */
386 la r3,pephyslink(r9) /* Point to the lock word */
387 ori r5,r5,LOW_ADDR(EXT(LockTimeOut)) /* Get second half of address */
388 li r4,PHYS_LOCK /* Get the lock bit value */
389 lwz r5,0(r5) /* Pick up the timeout value */
390 mtsprg 3,r9 /* Save R9 in SPRG3 */
392 bl EXT(hw_lock_bit) /* Go do the lock */
394 mfsprg r9,3 /* Restore pointer to the phys_entry */
395 mr. r3,r3 /* Did we timeout? */
396 lwz r4,pephyslink(r9) /* Pick up first mapping block */
397 beq- penterr /* Bad deal, we timed out... */
399 rlwinm r4,r4,0,0,26 ; Clear out the flags from first link
401 findmapb: mr. r3,r4 /* Did we hit the end? */
402 bne+ chkmapb /* Nope... */
404 la r3,pephyslink(r9) /* Point to where the lock is */
405 li r4,PHYS_LOCK /* Get the lock bit value */
406 bl EXT(hw_unlock_bit) /* Go unlock the physentry */
408 li r3,0 /* Say we failed */
409 b vbail /* Return in abject failure... */
411 penterr: li r3,1 /* Set timeout */
412 b vbail /* Return in abject failure... */
416 chkmapb: lwz r6,mmPTEv(r3) /* Pick up our virtual ID */
417 lwz r4,mmnext(r3) /* Pick up next mapping block */
418 cmplw r6,r11 /* Have we found ourself? */
419 lwz r5,mmPTEhash(r3) /* Get the start of our hash chain */
420 bne- findmapb /* Nope, still wandering... */
421 cmplw r5,r10 /* On the same hash chain? */
422 bne- findmapb /* Nope, keep looking... */
424 b vbail /* Return in glorious triumph... */
428 * hw_rem_map(mapping) - remove a mapping from the system.
430 * Upon entry, R3 contains a pointer to a mapping block and the associated
431 * physical entry is locked if there is one.
433 * If the mapping entry indicates that there is a PTE entry, we invalidate
434 * if and merge the reference and change information into the phys_entry.
436 * Next, we remove the mapping from the phys_ent and the PTEG hash list.
438 * Unlock any locks that are left, and exit.
440 * Note that this must be done with both interruptions off and VM off
442 * Note that this code depends upon the VSID being of the format 00SXXXXX
443 * where S is the segment number.
449 .globl EXT(hw_rem_map)
452 #if PERFTIMES && DEBUG
456 bl EXT(dbgLog2) ; Start of hw_add_map
460 mfsprg r9,2 ; Get feature flags
461 mfmsr r0 /* Save the MSR */
462 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
463 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
464 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
465 mtcrf 0x04,r9 ; Set the features
466 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
468 bt pfNoMSRirb,lmvNoMSR ; No MSR...
470 mtmsr r12 ; Translation and all off
471 isync ; Toss prefetch
477 li r0,loadMSR ; Get the MSR setter SC
478 mr r3,r12 ; Get new MSR
486 lwz r6,mmPTEhash(r3) /* Get pointer to hash list anchor */
487 lwz r5,mmPTEv(r3) /* Get the VSID */
488 dcbt 0,r6 /* We'll need that chain in a bit */
490 rlwinm r7,r6,0,0,25 /* Round hash list down to PCA boundary */
491 li r12,1 /* Get the locked value */
492 subi r6,r6,mmhashnext /* Make the anchor look like an entry */
494 ptegLck1: lwarx r10,0,r7 /* Get the PTEG lock */
495 mr. r10,r10 /* Is it locked? */
496 bne- ptegLckw1 /* Yeah... */
497 stwcx. r12,0,r7 /* Try to take it */
498 bne- ptegLck1 /* Someone else was trying, try again... */
499 b ptegSXg1 /* All done... */
503 ptegLckw1: mr. r10,r10 /* Check if it's already held */
504 beq+ ptegLck1 /* It's clear... */
505 lwz r10,0(r7) /* Get lock word again... */
506 b ptegLckw1 /* Wait... */
510 ptegSXg1: isync /* Make sure we haven't used anything yet */
512 lwz r12,mmhashnext(r3) /* Prime with our forward pointer */
513 lwz r4,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
515 srchmaps: mr. r10,r6 /* Save the previous entry */
516 bne+ mapok /* No error... */
518 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
519 ori r0,r0,LOW_ADDR(Choke)
520 sc /* Firmware Heimlich manuever */
524 mapok: lwz r6,mmhashnext(r6) /* Look at the next one */
525 cmplwi cr5,r4,0 /* Is there a PTE? */
526 cmplw r6,r3 /* Have we found ourselves? */
527 bne+ srchmaps /* Nope, get your head together... */
529 stw r12,mmhashnext(r10) /* Remove us from the queue */
530 rlwinm r9,r5,1,0,3 /* Move in the segment */
531 rlwinm r8,r4,6,4,19 /* Line PTEG disp up to a page */
532 rlwinm r11,r5,5,4,19 /* Line up the VSID */
533 lwz r10,mmphysent(r3) /* Point to the physical entry */
535 beq+ cr5,nopte /* There's no PTE to invalidate... */
537 xor r8,r8,r11 /* Back hash to virt index */
538 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
539 rlwimi r9,r5,22,4,9 /* Move in the API */
540 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
541 mfspr r11,pvr /* Find out what kind of machine we are */
542 rlwimi r9,r8,0,10,19 /* Create the virtual address */
543 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
545 stw r5,0(r4) /* Make the PTE invalid */
547 cmplwi cr1,r11,3 /* Is this a 603? */
548 sync /* Make sure the invalid is stored */
550 tlbhang1: lwarx r5,0,r12 /* Get the TLBIE lock */
551 rlwinm r11,r4,29,29,31 /* Get the bit position of entry */
552 mr. r5,r5 /* Is it locked? */
553 lis r6,0x8000 /* Start up a bit mask */
554 li r5,1 /* Get our lock word */
555 bne- tlbhang1 /* It's locked, go wait... */
556 stwcx. r5,0,r12 /* Try to get it */
557 bne- tlbhang1 /* We was beat... */
559 srw r6,r6,r11 /* Make a "free slot" mask */
560 lwz r5,PCAallo(r7) /* Get the allocation control bits */
561 rlwinm r11,r6,24,8,15 /* Make the autogen bit to turn off */
562 or r5,r5,r6 /* turn on the free bit */
563 rlwimi r11,r11,24,16,23 /* Get lock bit mask to turn it off */
565 andc r5,r5,r11 /* Turn off the lock and autogen bits in allocation flags */
566 li r11,0 /* Lock clear value */
568 tlbie r9 /* Invalidate it everywhere */
571 beq- cr1,its603a /* It's a 603, skip the tlbsync... */
573 eieio /* Make sure that the tlbie happens first */
574 tlbsync /* wait for everyone to catch up */
577 its603a: sync /* Make sure of it all */
578 stw r11,0(r12) /* Clear the tlbie lock */
579 eieio /* Make sure those RC bit are loaded */
580 stw r5,PCAallo(r7) /* Show that the slot is free */
581 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
583 nopte: mr. r10,r10 /* See if there is a physical entry */
584 la r9,pephyslink(r10) /* Point to the physical mapping chain */
585 beq- nophys /* No physical entry, we're done... */
586 beq- cr5,nadamrg /* No PTE to merge... */
588 lwz r6,4(r4) /* Get the latest reference and change bits */
589 la r12,pepte1(r10) /* Point right at the master copy */
590 rlwinm r6,r6,0,23,24 /* Extract just the RC bits */
592 mrgrc: lwarx r8,0,r12 /* Get the master copy */
593 or r8,r8,r6 /* Merge in latest RC */
594 stwcx. r8,0,r12 /* Save it back */
595 bne- mrgrc /* If it changed, try again... */
597 nadamrg: li r11,0 /* Clear this out */
598 lwz r12,mmnext(r3) /* Prime with our next */
600 sync ; Make sure all is saved
602 stw r11,0(r7) /* Unlock the hash chain now so we don't
603 lock out another processor during
604 our next little search */
606 srchpmap: mr. r10,r9 /* Save the previous entry */
607 bne+ mapok1 /* No error... */
609 lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
610 ori r0,r0,LOW_ADDR(Choke)
611 sc /* Firmware Heimlich maneuver */
615 mapok1: lwz r9,mmnext(r9) /* Look at the next one */
616 rlwinm r8,r9,0,27,31 ; Save the flags (including the lock)
617 rlwinm r9,r9,0,0,26 ; Clear out the flags from first link
618 cmplw r9,r3 /* Have we found ourselves? */
619 bne+ srchpmap /* Nope, get your head together... */
621 rlwimi r12,r8,0,27,31 ; Insert the lock and flags */
622 stw r12,mmnext(r10) /* Remove us from the queue */
624 mtmsr r0 /* Interrupts and translation back on */
626 #if PERFTIMES && DEBUG
629 bl EXT(dbgLog2) ; Start of hw_add_map
636 nophys: li r4,0 /* Make sure this is 0 */
637 sync /* Make sure that chain is updated */
638 stw r4,0(r7) /* Unlock the hash chain */
639 mtmsr r0 /* Interrupts and translation back on */
641 #if PERFTIMES && DEBUG
644 bl EXT(dbgLog2) ; Start of hw_add_map
651 * hw_prot(physent, prot) - Change the protection of a physical page
653 * Upon entry, R3 contains a pointer to a physical entry which is locked.
654 * R4 contains the PPC protection bits.
656 * The first thing we do is to slam the new protection into the phys entry.
657 * Then we scan the mappings and process each one.
659 * Acquire the lock on the PTEG hash list for the mapping being processed.
661 * If the current mapping has a PTE entry, we invalidate
662 * it and merge the reference and change information into the phys_entry.
664 * Next, slam the protection bits into the entry and unlock the hash list.
666 * Note that this must be done with both interruptions off and VM off
675 #if PERFTIMES && DEBUG
681 bl EXT(dbgLog2) ; Start of hw_add_map
685 mfsprg r9,2 ; Get feature flags
686 mfmsr r0 /* Save the MSR */
687 li r5,pepte1 /* Get displacement to the second word of master pte */
688 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
689 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
690 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
691 mtcrf 0x04,r9 ; Set the features
692 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
694 bt pfNoMSRirb,hpNoMSR ; No MSR...
696 mtmsr r12 ; Translation and all off
697 isync ; Toss prefetch
703 li r0,loadMSR ; Get the MSR setter SC
704 mr r3,r12 ; Get new MSR
712 lwz r10,pephyslink(r3) /* Get the first mapping block */
713 rlwinm r10,r10,0,0,26 ; Clear out the flags from first link
716 * Note that we need to to do the interlocked update here because another processor
717 * can be updating the reference and change bits even though the physical entry
718 * is locked. All modifications to the PTE portion of the physical entry must be
719 * done via interlocked update.
722 protcng: lwarx r8,r5,r3 /* Get the master copy */
723 rlwimi r8,r4,0,30,31 /* Move in the protection bits */
724 stwcx. r8,r5,r3 /* Save it back */
725 bne- protcng /* If it changed, try again... */
729 protnext: mr. r10,r10 /* Are there any more mappings? */
730 beq- protdone /* Naw... */
732 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
733 lwz r5,mmPTEv(r10) /* Get the virtual address */
734 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
736 li r12,1 /* Get the locked value */
738 protLck1: lwarx r11,0,r7 /* Get the PTEG lock */
739 mr. r11,r11 /* Is it locked? */
740 bne- protLckw1 /* Yeah... */
741 stwcx. r12,0,r7 /* Try to take it */
742 bne- protLck1 /* Someone else was trying, try again... */
743 b protSXg1 /* All done... */
747 protLckw1: mr. r11,r11 /* Check if it's already held */
748 beq+ protLck1 /* It's clear... */
749 lwz r11,0(r7) /* Get lock word again... */
750 b protLckw1 /* Wait... */
754 protSXg1: isync /* Make sure we haven't used anything yet */
756 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
758 rlwinm r9,r5,1,0,3 /* Move in the segment */
759 lwz r2,mmPTEr(r10) ; Get the mapping copy of the PTE
760 mr. r6,r6 /* See if there is a PTE here */
761 rlwinm r8,r5,31,2,25 /* Line it up */
762 rlwimi r2,r4,0,30,31 ; Move protection bits into the mapping copy
764 beq+ protul /* There's no PTE to invalidate... */
766 xor r8,r8,r6 /* Back hash to virt index */
767 rlwimi r9,r5,22,4,9 /* Move in the API */
768 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
769 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
770 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
771 mfspr r11,pvr /* Find out what kind of machine we are */
772 rlwimi r9,r8,6,10,19 /* Create the virtual address */
773 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
775 stw r5,0(r6) /* Make the PTE invalid */
776 cmplwi cr1,r11,3 /* Is this a 603? */
777 sync /* Make sure the invalid is stored */
779 tlbhangp: lwarx r11,0,r12 /* Get the TLBIE lock */
780 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
781 mr. r11,r11 /* Is it locked? */
782 lis r5,0x8000 /* Start up a bit mask */
783 li r11,1 /* Get our lock word */
784 bne- tlbhangp /* It's locked, go wait... */
785 stwcx. r11,0,r12 /* Try to get it */
786 bne- tlbhangp /* We was beat... */
788 li r11,0 /* Lock clear value */
790 tlbie r9 /* Invalidate it everywhere */
792 beq- cr1,its603p /* It's a 603, skip the tlbsync... */
794 eieio /* Make sure that the tlbie happens first */
795 tlbsync /* wait for everyone to catch up */
798 its603p: stw r11,0(r12) /* Clear the lock */
799 srw r5,r5,r8 /* Make a "free slot" mask */
800 sync /* Make sure of it all */
802 lwz r6,4(r6) /* Get the latest reference and change bits */
803 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
804 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
805 lwz r9,PCAallo(r7) /* Get the allocation control bits */
806 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
807 rlwimi r2,r6,0,23,24 ; Put the latest RC bit in mapping copy
808 or r9,r9,r5 /* Set the slot free */
809 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
810 andc r9,r9,r8 /* Clear the auto and lock bits */
811 li r5,pepte1 /* Get displacement to the second word of master pte */
812 stw r9,PCAallo(r7) /* Store the allocation controls */
814 protmod: lwarx r11,r5,r3 /* Get the master copy */
815 or r11,r11,r6 /* Merge in latest RC */
816 stwcx. r11,r5,r3 /* Save it back */
817 bne- protmod /* If it changed, try again... */
819 protul: li r4,0 /* Get a 0 */
820 stw r2,mmPTEr(r10) ; Save the updated mapping PTE
821 lwz r10,mmnext(r10) /* Get the next */
823 sync ; Make sure stores are complete
825 stw r4,0(r7) /* Unlock the hash chain */
826 b protnext /* Go get the next one */
830 protdone: mtmsr r0 /* Interrupts and translation back on */
832 #if PERFTIMES && DEBUG
835 bl EXT(dbgLog2) ; Start of hw_add_map
842 * hw_prot_virt(mapping, prot) - Change the protection of single page
844 * Upon entry, R3 contains a pointer (real) to a mapping.
845 * R4 contains the PPC protection bits.
847 * Acquire the lock on the PTEG hash list for the mapping being processed.
849 * If the current mapping has a PTE entry, we invalidate
850 * it and merge the reference and change information into the phys_entry.
852 * Next, slam the protection bits into the entry, merge the RC bits,
853 * and unlock the hash list.
855 * Note that this must be done with both interruptions off and VM off
861 .globl EXT(hw_prot_virt)
864 #if PERFTIMES && DEBUG
870 bl EXT(dbgLog2) ; Start of hw_add_map
874 mfsprg r9,2 ; Get feature flags
875 mfmsr r0 /* Save the MSR */
876 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
877 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
878 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
879 mtcrf 0x04,r9 ; Set the features
880 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
882 bt pfNoMSRirb,hpvNoMSR ; No MSR...
884 mtmsr r12 ; Translation and all off
885 isync ; Toss prefetch
891 li r0,loadMSR ; Get the MSR setter SC
892 mr r3,r12 ; Get new MSR
901 * Note that we need to to do the interlocked update here because another processor
902 * can be updating the reference and change bits even though the physical entry
903 * is locked. All modifications to the PTE portion of the physical entry must be
904 * done via interlocked update.
907 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
908 lwz r5,mmPTEv(r3) /* Get the virtual address */
909 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
911 li r12,1 /* Get the locked value */
913 protvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
914 mr. r11,r11 /* Is it locked? */
915 bne- protvLckw1 /* Yeah... */
916 stwcx. r12,0,r7 /* Try to take it */
917 bne- protvLck1 /* Someone else was trying, try again... */
918 b protvSXg1 /* All done... */
922 protvLckw1: mr. r11,r11 /* Check if it's already held */
923 beq+ protvLck1 /* It's clear... */
924 lwz r11,0(r7) /* Get lock word again... */
925 b protvLckw1 /* Wait... */
929 protvSXg1: isync /* Make sure we haven't used anything yet */
931 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
932 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
934 rlwinm r9,r5,1,0,3 /* Move in the segment */
935 cmplwi cr7,r6,0 ; Any PTE to invalidate?
936 rlwimi r2,r4,0,30,31 ; Move in the new protection bits
937 rlwinm r8,r5,31,2,25 /* Line it up */
939 beq+ cr7,pvnophys /* There's no PTE to invalidate... */
941 xor r8,r8,r6 /* Back hash to virt index */
942 rlwimi r9,r5,22,4,9 /* Move in the API */
943 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
944 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
945 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
946 mfspr r11,pvr /* Find out what kind of machine we are */
947 rlwimi r9,r8,6,10,19 /* Create the virtual address */
948 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
950 stw r5,0(r6) /* Make the PTE invalid */
951 cmplwi cr1,r11,3 /* Is this a 603? */
952 sync /* Make sure the invalid is stored */
954 tlbhangpv: lwarx r11,0,r12 /* Get the TLBIE lock */
955 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
956 mr. r11,r11 /* Is it locked? */
957 lis r5,0x8000 /* Start up a bit mask */
958 li r11,1 /* Get our lock word */
959 bne- tlbhangpv /* It's locked, go wait... */
960 stwcx. r11,0,r12 /* Try to get it */
961 bne- tlbhangpv /* We was beat... */
963 li r11,0 /* Lock clear value */
965 tlbie r9 /* Invalidate it everywhere */
967 beq- cr1,its603pv /* It's a 603, skip the tlbsync... */
969 eieio /* Make sure that the tlbie happens first */
970 tlbsync /* wait for everyone to catch up */
973 its603pv: stw r11,0(r12) /* Clear the lock */
974 srw r5,r5,r8 /* Make a "free slot" mask */
975 sync /* Make sure of it all */
977 lwz r6,4(r6) /* Get the latest reference and change bits */
978 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
979 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
980 lwz r9,PCAallo(r7) /* Get the allocation control bits */
981 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
982 lwz r10,mmphysent(r3) ; Get any physical entry
983 or r9,r9,r5 /* Set the slot free */
984 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
985 andc r9,r9,r8 /* Clear the auto and lock bits */
986 mr. r10,r10 ; Is there a physical entry?
987 li r5,pepte1 /* Get displacement to the second word of master pte */
988 stw r9,PCAallo(r7) /* Store the allocation controls */
989 rlwimi r2,r6,0,23,24 ; Stick in RC bits
990 beq- pvnophys ; No physical entry...
992 protvmod: lwarx r11,r5,r10 /* Get the master copy */
993 or r11,r11,r6 /* Merge in latest RC */
994 stwcx. r11,r5,r10 /* Save it back */
995 bne- protvmod /* If it changed, try again... */
997 pvnophys: li r4,0 /* Get a 0 */
998 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1000 sync ; Make sure everything is stored
1002 stw r4,0(r7) /* Unlock the hash chain */
1003 mtmsr r0 ; Restore interrupts and translation
1006 #if PERFTIMES && DEBUG
1016 * hw_attr_virt(mapping, attr) - Change the attributes of single page
1018 * Upon entry, R3 contains a pointer (real) to a mapping.
1019 * R4 contains the WIMG bits.
1021 * Acquire the lock on the PTEG hash list for the mapping being processed.
1023 * If the current mapping has a PTE entry, we invalidate
1024 * it and merge the reference and change information into the phys_entry.
1026 * Next, slam the WIMG bits into the entry, merge the RC bits,
1027 * and unlock the hash list.
1029 * Note that this must be done with both interruptions off and VM off
1035 .globl EXT(hw_attr_virt)
1038 #if PERFTIMES && DEBUG
1044 bl EXT(dbgLog2) ; Start of hw_add_map
1048 mfsprg r9,2 ; Get feature flags
1049 mfmsr r0 /* Save the MSR */
1050 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1051 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1052 mtcrf 0x04,r9 ; Set the features
1053 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1054 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1056 bt pfNoMSRirb,havNoMSR ; No MSR...
1058 mtmsr r12 ; Translation and all off
1059 isync ; Toss prefetch
1065 li r0,loadMSR ; Get the MSR setter SC
1066 mr r3,r12 ; Get new MSR
1073 * Note that we need to to do the interlocked update here because another processor
1074 * can be updating the reference and change bits even though the physical entry
1075 * is locked. All modifications to the PTE portion of the physical entry must be
1076 * done via interlocked update.
1079 lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */
1080 lwz r5,mmPTEv(r3) /* Get the virtual address */
1081 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1083 li r12,1 /* Get the locked value */
1085 attrvLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1086 mr. r11,r11 /* Is it locked? */
1087 bne- attrvLckw1 /* Yeah... */
1088 stwcx. r12,0,r7 /* Try to take it */
1089 bne- attrvLck1 /* Someone else was trying, try again... */
1090 b attrvSXg1 /* All done... */
1094 attrvLckw1: mr. r11,r11 /* Check if it's already held */
1095 beq+ attrvLck1 /* It's clear... */
1096 lwz r11,0(r7) /* Get lock word again... */
1097 b attrvLckw1 /* Wait... */
1101 attrvSXg1: isync /* Make sure we haven't used anything yet */
1103 lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */
1104 lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part
1106 rlwinm r9,r5,1,0,3 /* Move in the segment */
1107 mr. r6,r6 /* See if there is a PTE here */
1108 rlwimi r2,r4,0,25,28 ; Move in the new attribute bits
1109 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1111 beq+ avnophys /* There's no PTE to invalidate... */
1113 xor r8,r8,r6 /* Back hash to virt index */
1114 rlwimi r9,r5,22,4,9 /* Move in the API */
1115 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1116 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1117 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1118 mfspr r11,pvr /* Find out what kind of machine we are */
1119 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1120 rlwinm r11,r11,16,16,31 /* Isolate CPU type */
1121 stw r5,0(r6) /* Make the PTE invalid */
1122 cmplwi cr1,r11,3 /* Is this a 603? */
1123 sync /* Make sure the invalid is stored */
1125 tlbhangav: lwarx r11,0,r12 /* Get the TLBIE lock */
1126 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1127 mr. r11,r11 /* Is it locked? */
1128 lis r5,0x8000 /* Start up a bit mask */
1129 li r11,1 /* Get our lock word */
1130 bne- tlbhangav /* It's locked, go wait... */
1131 stwcx. r11,0,r12 /* Try to get it */
1132 bne- tlbhangav /* We was beat... */
1134 li r11,0 /* Lock clear value */
1136 tlbie r9 /* Invalidate it everywhere */
1138 beq- cr1,its603av /* It's a 603, skip the tlbsync... */
1140 eieio /* Make sure that the tlbie happens first */
1141 tlbsync /* wait for everyone to catch up */
1144 its603av: stw r11,0(r12) /* Clear the lock */
1145 srw r5,r5,r8 /* Make a "free slot" mask */
1146 sync /* Make sure of it all */
1148 lwz r6,4(r6) /* Get the latest reference and change bits */
1149 stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */
1150 rlwinm r6,r6,0,23,24 /* Extract the RC bits */
1151 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1152 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1153 lwz r10,mmphysent(r3) ; Get any physical entry
1154 or r9,r9,r5 /* Set the slot free */
1155 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1156 andc r9,r9,r8 /* Clear the auto and lock bits */
1157 mr. r10,r10 ; Is there a physical entry?
1158 li r5,pepte1 /* Get displacement to the second word of master pte */
1159 stw r9,PCAallo(r7) /* Store the allocation controls */
1160 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1161 beq- avnophys ; No physical entry...
1163 attrvmod: lwarx r11,r5,r10 /* Get the master copy */
1164 or r11,r11,r6 /* Merge in latest RC */
1165 stwcx. r11,r5,r10 /* Save it back */
1166 bne- attrvmod /* If it changed, try again... */
1168 avnophys: li r4,0 /* Get a 0 */
1169 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1171 sync ; Make sure that everything is updated
1173 stw r4,0(r7) /* Unlock the hash chain */
1175 rlwinm r2,r2,0,0,19 ; Clear back to page boundary
1177 attrflsh: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1178 dcbst r2,r4 ; Flush cache because we changed attributes
1179 addi r4,r4,32 ; Bump up cache
1180 blt+ attrflsh ; Do the whole page...
1184 attrimvl: cmplwi r4,(4096-32) ; Are we about to do the last line on page?
1185 dcbi r2,r4 ; Invalidate dcache because we changed attributes
1186 icbi r2,r4 ; Invalidate icache because we changed attributes
1187 addi r4,r4,32 ; Bump up cache
1188 blt+ attrimvl ; Do the whole page...
1191 mtmsr r0 ; Restore interrupts and translation
1194 #if PERFTIMES && DEBUG
1204 * hw_pte_comm(physent) - Do something to the PTE pointing to a physical page
1206 * Upon entry, R3 contains a pointer to a physical entry which is locked.
1207 * Note that this must be done with both interruptions off and VM off
1209 * First, we set up CRs 5 and 7 to indicate which of the 7 calls this is.
1211 * Now we scan the mappings to invalidate any with an active PTE.
1213 * Acquire the lock on the PTEG hash list for the mapping being processed.
1215 * If the current mapping has a PTE entry, we invalidate
1216 * it and merge the reference and change information into the phys_entry.
1218 * Next, unlock the hash list and go on to the next mapping.
1225 .globl EXT(hw_inv_all)
1229 li r9,0x800 /* Indicate invalidate all */
1230 li r2,0 ; No inadvertant modifications please
1231 b hw_pte_comm /* Join in the fun... */
1235 .globl EXT(hw_tst_mod)
1239 lwz r8,pepte1(r3) ; Get the saved PTE image
1240 li r9,0x400 /* Indicate test modify */
1241 li r2,0 ; No inadvertant modifications please
1242 rlwinm. r8,r8,25,31,31 ; Make change bit into return code
1243 beq+ hw_pte_comm ; Assume we do not know if it is set...
1244 mr r3,r8 ; Set the return code
1245 blr ; Return quickly...
1248 .globl EXT(hw_tst_ref)
1251 lwz r8,pepte1(r3) ; Get the saved PTE image
1252 li r9,0x200 /* Indicate test reference bit */
1253 li r2,0 ; No inadvertant modifications please
1254 rlwinm. r8,r8,24,31,31 ; Make reference bit into return code
1255 beq+ hw_pte_comm ; Assume we do not know if it is set...
1256 mr r3,r8 ; Set the return code
1257 blr ; Return quickly...
1260 * Note that the following are all in one CR for ease of use later
1263 .globl EXT(hw_set_mod)
1267 li r9,0x008 /* Indicate set modify bit */
1268 li r2,0x4 ; Set set C, clear none
1269 b hw_pte_comm /* Join in the fun... */
1273 .globl EXT(hw_clr_mod)
1277 li r9,0x004 /* Indicate clear modify bit */
1278 li r2,0x1 ; Set set none, clear C
1279 b hw_pte_comm /* Join in the fun... */
1283 .globl EXT(hw_set_ref)
1287 li r9,0x002 /* Indicate set reference */
1288 li r2,0x8 ; Set set R, clear none
1289 b hw_pte_comm /* Join in the fun... */
1292 .globl EXT(hw_clr_ref)
1296 li r9,0x001 /* Indicate clear reference bit */
1297 li r2,0x2 ; Set set none, clear R
1298 b hw_pte_comm /* Join in the fun... */
1302 * This is the common stuff.
1307 hw_pte_comm: /* Common routine for pte tests and manips */
1309 #if PERFTIMES && DEBUG
1315 bl EXT(dbgLog2) ; Start of hw_add_map
1319 mfsprg r8,2 ; Get feature flags
1320 lwz r10,pephyslink(r3) /* Get the first mapping block */
1321 mfmsr r0 /* Save the MSR */
1322 rlwinm. r10,r10,0,0,26 ; Clear out the flags from first link and see if we are mapped
1323 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1324 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1325 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1326 mtcrf 0x04,r8 ; Set the features
1327 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1328 beq- comnmap ; No mapping
1329 dcbt br0,r10 ; Touch the first mapping in before the isync
1333 bt pfNoMSRirb,hpcNoMSR ; No MSR...
1335 mtmsr r12 ; Translation and all off
1336 isync ; Toss prefetch
1342 li r0,loadMSR ; Get the MSR setter SC
1343 mr r3,r12 ; Get new MSR
1349 mtcrf 0x05,r9 /* Set the call type flags into cr5 and 7 */
1351 beq- commdone ; Nothing us mapped to this page...
1352 b commnext ; Jump to first pass (jump here so we can align loop)
1356 commnext: lwz r11,mmnext(r10) ; Get the pointer to the next mapping (if any)
1357 lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */
1358 lwz r5,mmPTEv(r10) /* Get the virtual address */
1359 mr. r11,r11 ; More mappings to go?
1360 rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */
1361 beq- commnxtch ; No more mappings...
1362 dcbt br0,r11 ; Touch the next mapping
1364 commnxtch: li r12,1 /* Get the locked value */
1366 commLck1: lwarx r11,0,r7 /* Get the PTEG lock */
1367 mr. r11,r11 /* Is it locked? */
1368 bne- commLckw1 /* Yeah... */
1369 stwcx. r12,0,r7 /* Try to take it */
1370 bne- commLck1 /* Someone else was trying, try again... */
1371 b commSXg1 /* All done... */
1375 commLckw1: mr. r11,r11 /* Check if it's already held */
1376 beq+ commLck1 /* It's clear... */
1377 lwz r11,0(r7) /* Get lock word again... */
1378 b commLckw1 /* Wait... */
1382 commSXg1: isync /* Make sure we haven't used anything yet */
1384 lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */
1386 rlwinm r9,r5,1,0,3 /* Move in the segment */
1387 mr. r6,r6 /* See if there is a PTE entry here */
1388 rlwinm r8,r5,31,2,25 /* Line it up and check if empty */
1390 beq+ commul /* There's no PTE to invalidate... */
1392 xor r8,r8,r6 /* Back hash to virt index */
1393 rlwimi r9,r5,22,4,9 /* Move in the API */
1394 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
1395 rlwinm r5,r5,0,1,31 /* Clear the valid bit */
1396 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
1397 rlwimi r9,r8,6,10,19 /* Create the virtual address */
1399 stw r5,0(r6) /* Make the PTE invalid */
1400 mfspr r4,pvr /* Find out what kind of machine we are */
1401 sync /* Make sure the invalid is stored */
1403 tlbhangco: lwarx r11,0,r12 /* Get the TLBIE lock */
1404 rlwinm r8,r6,29,29,31 /* Get the bit position of entry */
1405 mr. r11,r11 /* Is it locked? */
1406 lis r5,0x8000 /* Start up a bit mask */
1407 li r11,1 /* Get our lock word */
1408 bne- tlbhangco /* It's locked, go wait... */
1409 stwcx. r11,0,r12 /* Try to get it */
1410 bne- tlbhangco /* We was beat... */
1412 rlwinm r4,r4,16,16,31 /* Isolate CPU type */
1413 li r11,0 /* Lock clear value */
1414 cmplwi r4,3 /* Is this a 603? */
1416 tlbie r9 /* Invalidate it everywhere */
1418 beq- its603co /* It's a 603, skip the tlbsync... */
1420 eieio /* Make sure that the tlbie happens first */
1421 tlbsync /* wait for everyone to catch up */
1424 its603co: stw r11,0(r12) /* Clear the lock */
1425 srw r5,r5,r8 /* Make a "free slot" mask */
1426 sync /* Make sure of it all */
1428 lwz r6,4(r6) /* Get the latest reference and change bits */
1429 lwz r9,PCAallo(r7) /* Get the allocation control bits */
1430 stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */
1431 rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */
1432 or r9,r9,r5 /* Set the slot free */
1433 rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */
1434 rlwinm r4,r6,0,23,24 /* Extract the RC bits */
1435 andc r9,r9,r8 /* Clear the auto and lock bits */
1436 li r5,pepte1 /* Get displacement to the second word of master pte */
1437 stw r9,PCAallo(r7) /* Store the allocation controls */
1439 commmod: lwarx r11,r5,r3 /* Get the master copy */
1440 or r11,r11,r4 /* Merge in latest RC */
1441 stwcx. r11,r5,r3 /* Save it back */
1442 bne- commmod /* If it changed, try again... */
1443 b commulnl ; Skip loading the old real part...
1445 commul: lwz r6,mmPTEr(r10) ; Get the real part
1447 commulnl: rlwinm r12,r2,5,23,24 ; Get the "set" bits
1448 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1450 or r6,r6,r12 ; Set the bits to come on
1451 andc r6,r6,r11 ; Clear those to come off
1453 stw r6,mmPTEr(r10) ; Set the new RC
1455 lwz r10,mmnext(r10) /* Get the next */
1456 li r4,0 /* Make sure this is 0 */
1457 mr. r10,r10 ; Is there another mapping?
1459 sync ; Make sure that all is saved
1461 stw r4,0(r7) /* Unlock the hash chain */
1462 bne+ commnext ; Go get the next if there is one...
1465 * Now that all PTEs have been invalidated and the master RC bits are updated,
1466 * we go ahead and figure out what the original call was and do that. Note that
1467 * another processor could be messing around and may have entered one of the
1468 * PTEs we just removed into the hash table. Too bad... You takes yer chances.
1469 * If there's a problem with that, it's because some higher level was trying to
1470 * do something with a mapping that it shouldn't. So, the problem's really
1471 * there, nyaaa, nyaaa, nyaaa... nyaaa, nyaaa... nyaaa! So there!
1474 commdone: li r5,pepte1 /* Get displacement to the second word of master pte */
1475 blt cr5,commfini /* We're finished, it was invalidate all... */
1476 bgt cr5,commtst /* It was a test modified... */
1477 beq cr5,commtst /* It was a test reference... */
1480 * Note that we need to to do the interlocked update here because another processor
1481 * can be updating the reference and change bits even though the physical entry
1482 * is locked. All modifications to the PTE portion of the physical entry must be
1483 * done via interlocked update.
1486 rlwinm r12,r2,5,23,24 ; Get the "set" bits
1487 rlwinm r11,r2,7,23,24 ; Get the "clear" bits
1489 commcng: lwarx r8,r5,r3 /* Get the master copy */
1490 or r8,r8,r12 ; Set the bits to come on
1491 andc r8,r8,r11 ; Clear those to come off
1492 stwcx. r8,r5,r3 /* Save it back */
1493 bne- commcng /* If it changed, try again... */
1495 mtmsr r0 /* Interrupts and translation back on */
1497 #if PERFTIMES && DEBUG
1501 bl EXT(dbgLog2) ; Start of hw_add_map
1509 commtst: lwz r8,pepte1(r3) /* Get the PTE */
1510 bne- cr5,commtcb ; This is for the change bit...
1511 mtmsr r0 ; Interrupts and translation back on
1512 rlwinm r3,r8,24,31,31 ; Copy reference bit to bit 31
1513 isync ; Toss prefetching
1514 #if PERFTIMES && DEBUG
1518 bl EXT(dbgLog2) ; Start of hw_add_map
1526 commtcb: rlwinm r3,r8,25,31,31 ; Copy change bit to bit 31
1528 commfini: mtmsr r0 ; Interrupts and translation back on
1529 isync ; Toss prefetching
1531 #if PERFTIMES && DEBUG
1535 bl EXT(dbgLog2) ; Start of hw_add_map
1542 * unsigned int hw_test_rc(mapping *mp, boolean_t reset);
1544 * Test the RC bits for a specific mapping. If reset is non-zero, clear them.
1545 * We return the RC value in the mapping if there is no PTE or if C is set.
1546 * (Note: R is always set with C.) Otherwise we invalidate the PTE and
1547 * collect the RC bits from there, also merging them into the global copy.
1549 * For now, we release the PTE slot and leave it invalid. In the future, we
1550 * may consider re-validating and not releasing the slot. It would be faster,
1551 * but our current implementation says that we will have not PTEs valid
1552 * without the reference bit set.
1554 * We will special case C==1 && not reset to just return the RC.
1556 * Probable state is worst performance state: C bit is off and there is a PTE.
1562 .globl EXT(hw_test_rc)
1566 mfsprg r9,2 ; Get feature flags
1567 mfmsr r0 ; Save the MSR
1568 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1569 mr. r4,r4 ; See if we have a reset to do later
1570 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1571 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruption mask
1572 crnot htrReset,cr0_eq ; Remember reset
1573 mtcrf 0x04,r9 ; Set the features
1574 rlwinm r12,r12,0,28,25 ; Clear IR and DR
1576 bt pfNoMSRirb,htrNoMSR ; No MSR...
1578 mtmsr r12 ; Translation and all off
1579 isync ; Toss prefetch
1585 li r0,loadMSR ; Get the MSR setter SC
1586 mr r3,r12 ; Get new MSR
1592 lwz r2,mmPTEr(r3) ; Get the real part
1593 lwz r7,mmPTEhash(r3) ; Get pointer to hash list anchor
1594 rlwinm. r12,r2,0,24,24 ; Is the change bit on?
1595 lwz r5,mmPTEv(r3) ; Get the virtual address
1596 crnor cr0_eq,cr0_eq,htrReset ; Set if C=1 && not reset
1597 rlwinm r7,r7,0,0,25 ; Round hash list down to PCA boundary
1598 bt cr0_eq,htrcset ; Special case changed but no reset case...
1600 li r12,1 ; Get the locked value
1602 htrLck1: lwarx r11,0,r7 ; Get the PTEG lock
1603 mr. r11,r11 ; Is it locked?
1604 bne- htrLckw1 ; Yeah...
1605 stwcx. r12,0,r7 ; Try to take it
1606 bne- htrLck1 ; Someone else was trying, try again...
1607 b htrSXg1 ; All done...
1611 htrLckw1: mr. r11,r11 ; Check if it is already held
1612 beq+ htrLck1 ; It is clear...
1613 lwz r11,0(r7) ; Get lock word again...
1614 b htrLckw1 ; Wait...
1618 htrSXg1: isync ; Make sure we have not used anything yet
1620 lwz r6,mmPTEent(r3) ; Get the pointer to the PTE now that the lock is set
1621 lwz r2,mmPTEr(r3) ; Get the mapping copy of the real part
1623 rlwinm r9,r5,1,0,3 ; Move in the segment
1624 mr. r6,r6 ; Any PTE to invalidate?
1625 rlwinm r8,r5,31,2,25 ; Line it up
1627 beq+ htrnopte ; There is no PTE to invalidate...
1629 xor r8,r8,r6 ; Back hash to virt index
1630 rlwimi r9,r5,22,4,9 ; Move in the API
1631 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
1632 rlwinm r5,r5,0,1,31 ; Clear the valid bit
1633 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
1634 mfspr r11,pvr ; Find out what kind of machine we are
1635 rlwimi r9,r8,6,10,19 ; Create the virtual address
1636 rlwinm r11,r11,16,16,31 ; Isolate CPU type
1638 stw r5,0(r6) ; Make the PTE invalid
1639 cmplwi cr1,r11,3 ; Is this a 603?
1640 sync ; Make sure the invalid is stored
1642 htrtlbhang: lwarx r11,0,r12 ; Get the TLBIE lock
1643 rlwinm r8,r6,29,29,31 ; Get the bit position of entry
1644 mr. r11,r11 ; Is it locked?
1645 lis r5,0x8000 ; Start up a bit mask
1646 li r11,1 ; Get our lock word
1647 bne- htrtlbhang ; It is locked, go wait...
1648 stwcx. r11,0,r12 ; Try to get it
1649 bne- htrtlbhang ; We was beat...
1651 li r11,0 ; Lock clear value
1653 tlbie r9 ;Invalidate it everywhere
1655 beq- cr1,htr603 ; It is a 603, skip the tlbsync...
1657 eieio ; Make sure that the tlbie happens first
1658 tlbsync ; wait for everyone to catch up
1661 htr603: stw r11,0(r12) ; Clear the lock
1662 srw r5,r5,r8 ; Make a "free slot" mask
1663 sync ; Make sure of it all
1665 lwz r6,4(r6) ; Get the latest reference and change bits
1666 stw r11,mmPTEent(r3) ; Clear the pointer to the PTE
1667 rlwinm r6,r6,0,23,24 ; Extract the RC bits
1668 lwz r9,PCAallo(r7) ; Get the allocation control bits
1669 rlwinm r8,r5,24,8,15 ; Make the autogen bit to turn off
1670 lwz r10,mmphysent(r3) ; Get any physical entry
1671 or r9,r9,r5 ; Set the slot free
1672 rlwimi r8,r8,24,16,23 ; Get lock bit mask to turn it off
1673 andc r9,r9,r8 ; Clear the auto and lock bits
1674 mr. r10,r10 ; Is there a physical entry?
1675 li r5,pepte1 ; Get displacement to the second word of master pte
1676 stw r9,PCAallo(r7) ; Store the allocation controls
1677 rlwimi r2,r6,0,23,24 ; Stick in RC bits
1678 beq- htrnopte ; No physical entry...
1680 htrmrc: lwarx r11,r5,r10 ; Get the master copy
1681 or r11,r11,r6 ; Merge in latest RC
1682 stwcx. r11,r5,r10 ; Save it back
1683 bne- htrmrc ; If it changed, try again...
1685 htrnopte: rlwinm r5,r2,25,30,31 ; Position RC and mask off
1686 bf htrReset,htrnorst ; No reset to do...
1687 rlwinm r2,r2,0,25,22 ; Clear the RC if requested
1689 htrnorst: li r4,0 ; Get a 0
1690 stw r2,mmPTEr(r3) ; Set the real part of the PTE
1692 sync ; Make sure that stuff is all stored
1694 stw r4,0(r7) ; Unlock the hash chain
1696 mr r3,r5 ; Get the old RC to pass back
1697 mtmsr r0 ; Restore interrupts and translation
1703 htrcset: rlwinm r3,r2,25,30,31 ; Position RC and mask off
1704 mtmsr r0 ; Restore interrupts and translation
1710 * hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) - Sets the default physical page attributes
1712 * Note that this must be done with both interruptions off and VM off
1713 * Move the passed in attributes into the pte image in the phys entry
1719 .globl EXT(hw_phys_attr)
1723 #if PERFTIMES && DEBUG
1731 bl EXT(dbgLog2) ; Start of hw_add_map
1737 mfsprg r9,2 ; Get feature flags
1738 mfmsr r0 /* Save the MSR */
1739 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1740 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1741 andi. r5,r5,0x0078 /* Clean up the WIMG */
1742 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
1743 mtcrf 0x04,r9 ; Set the features
1744 rlwimi r5,r4,0,30,31 /* Move the protection into the wimg register */
1745 la r6,pepte1(r3) /* Point to the default pte */
1746 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
1748 bt pfNoMSRirb,hpaNoMSR ; No MSR...
1750 mtmsr r12 ; Translation and all off
1751 isync ; Toss prefetch
1757 li r0,loadMSR ; Get the MSR setter SC
1758 mr r3,r12 ; Get new MSR
1764 atmattr: lwarx r10,0,r6 /* Get the pte */
1765 rlwimi r10,r5,0,25,31 /* Move in the new attributes */
1766 stwcx. r10,0,r6 /* Try it on for size */
1767 bne- atmattr /* Someone else was trying, try again... */
1769 mtmsr r0 /* Interrupts and translation back on */
1771 #if PERFTIMES && DEBUG
1775 bl EXT(dbgLog2) ; Start of hw_add_map
1778 blr /* All done... */
1783 * handlePF - handle a page fault interruption
1785 * If the fault can be handled, this routine will RFI directly,
1786 * otherwise it will return with all registers as in entry.
1788 * Upon entry, state and all registers have been saved in savearea.
1789 * This is pointed to by R13.
1790 * IR and DR are off, interrupts are masked,
1791 * Floating point be disabled.
1792 * R3 is the interrupt code.
1794 * If we bail, we must restore cr5, and all registers except 6 and
1800 .globl EXT(handlePF)
1805 * This first part does a quick check to see if we can handle the fault.
1806 * We can't handle any kind of protection exceptions here, so we pass
1807 * them up to the next level.
1809 * The mapping lists are kept in MRS (most recently stolen)
1810 * order on queues anchored within from the
1811 * PTEG to which the virtual address hashes. This is further segregated by
1812 * the low-order 3 bits of the VSID XORed with the segment number and XORed
1813 * with bits 4-7 of the vaddr in an attempt to keep the searches
1816 * MRS is handled by moving the entry to the head of its list when stolen in the
1817 * assumption that it will be revalidated soon. Entries are created on the head
1818 * of the list because they will be used again almost immediately.
1820 * We need R13 set to the savearea, R3 set to the interrupt code, and R2
1821 * set to the per_proc.
1823 * NOTE: In order for a page-fault redrive to work, the translation miss
1824 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
1825 * before we come here.
1828 cmplwi r3,T_INSTRUCTION_ACCESS /* See if this is for the instruction */
1829 lwz r8,savesrr1(r13) ; Get the MSR to determine mode
1830 beq- gotIfetch ; We have an IFETCH here...
1832 lwz r7,savedsisr(r13) /* Get the DSISR */
1833 lwz r6,savedar(r13) /* Get the fault address */
1834 b ckIfProt ; Go check if this is a protection fault...
1836 gotIfetch: mr r7,r8 ; IFETCH info is in SRR1
1837 lwz r6,savesrr0(r13) /* Get the instruction address */
1839 ckIfProt: rlwinm. r7,r7,0,1,1 ; Is this a protection exception?
1840 beqlr- ; Yes... (probably not though)
1843 * We will need to restore registers if we bail after this point.
1844 * Note that at this point several SRs have been changed to the kernel versions.
1845 * Therefore, for these we must build these values.
1848 #if PERFTIMES && DEBUG
1853 bl EXT(dbgLog2) ; Start of hw_add_map
1858 lwz r3,PP_USERPMAP(r2) ; Get the user pmap (not needed if kernel access, but optimize for user??)
1859 rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Supervisor state access?
1860 rlwinm r5,r6,6,26,29 ; Get index to the segment slot
1861 eqv r1,r1,r1 ; Fill the bottom with foxes
1862 bne+ notsuper ; Go do the user mode interrupt stuff...
1864 cmplwi cr1,r5,SR_COPYIN_NUM*4 ; See if this is the copyin/copyout segment
1865 rlwinm r3,r6,24,8,11 ; Make the kernel VSID
1866 bne+ cr1,havevsid ; We are done if we do not want the copyin/out guy...
1868 mfsr r3,SR_COPYIN ; Get the copy vsid
1869 b havevsid ; Join up...
1873 notsuper: addi r5,r5,PMAP_SEGS ; Get offset to table
1874 lwzx r3,r3,r5 ; Get the VSID
1876 havevsid: mfspr r5,sdr1 /* Get hash table base and size */
1877 cror cr1_eq,cr0_eq,cr0_eq ; Remember if kernel fault for later
1878 rlwinm r9,r6,2,2,5 ; Move nybble 1 up to 0 (keep aligned with VSID)
1879 rlwimi r1,r5,16,0,15 /* Make table size -1 out of mask */
1880 rlwinm r3,r3,6,2,25 /* Position the space for the VSID */
1881 rlwinm r7,r6,26,10,25 /* Isolate the page index */
1882 xor r9,r9,r3 ; Splooch vaddr nybble 0 (from VSID) and 1 together
1883 or r8,r5,r1 /* Point to the last byte in table */
1884 xor r7,r7,r3 /* Get primary hash */
1885 rlwinm r3,r3,1,1,24 /* Position VSID for pte ID */
1886 addi r8,r8,1 /* Point to the PTEG Control Area */
1887 rlwinm r9,r9,8,27,29 ; Get splooched bits in place
1888 and r7,r7,r1 /* Wrap the hash */
1889 rlwimi r3,r6,10,26,31 /* Move API into pte ID */
1890 add r8,r8,r7 /* Point to our PCA entry */
1891 rlwinm r12,r3,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
1892 la r11,PCAhash(r8) /* Point to the mapping hash area */
1893 xor r9,r9,r12 ; Finish splooching nybble 0, 1, and the low bits of the VSID
1897 * We have about as much as we need to start searching the autogen (aka block maps)
1898 * and mappings. From here on, any kind of failure will bail, and
1899 * contention will either bail or restart from here.
1904 li r12,1 /* Get the locked value */
1905 dcbt 0,r11 /* We'll need the hash area in a sec, so get it */
1906 add r11,r11,r9 /* Point to the right mapping hash slot */
1908 ptegLck: lwarx r10,0,r8 /* Get the PTEG lock */
1909 mr. r10,r10 /* Is it locked? */
1910 bne- ptegLckw /* Yeah... */
1911 stwcx. r12,0,r8 /* Take take it */
1912 bne- ptegLck /* Someone else was trying, try again... */
1913 b ptegSXg /* All done... */
1917 ptegLckw: mr. r10,r10 /* Check if it's already held */
1918 beq+ ptegLck /* It's clear... */
1919 lwz r10,0(r8) /* Get lock word again... */
1920 b ptegLckw /* Wait... */
1924 nop ; Force ISYNC to last instruction in IFETCH
1928 ptegSXg: isync /* Make sure we haven't used anything yet */
1930 lwz r9,0(r11) /* Pick up first mapping block */
1931 mr r5,r11 /* Get the address of the anchor */
1932 mr r7,r9 /* Save the first in line */
1933 b findmap ; Take space and force loop to cache line
1935 findmap: mr. r12,r9 /* Are there more? */
1936 beq- tryAuto /* Nope, nothing in mapping list for us... */
1938 lwz r10,mmPTEv(r12) /* Get unique PTE identification */
1939 lwz r9,mmhashnext(r12) /* Get the chain, just in case */
1940 cmplw r10,r3 /* Did we hit our PTE? */
1941 lwz r0,mmPTEent(r12) /* Get the pointer to the hash table entry */
1942 mr r5,r12 /* Save the current as previous */
1943 bne- findmap ; Nothing here, try the next...
1945 ; Cache line boundary here
1947 cmplwi cr1,r0,0 /* Is there actually a PTE entry in the hash? */
1948 lwz r2,mmphysent(r12) /* Get the physical entry */
1949 bne- cr1,MustBeOK /* There's an entry in the hash table, so, this must
1950 have been taken care of already... */
1951 lis r4,0x8000 ; Tell PTE inserter that this was not an auto
1952 cmplwi cr2,r2,0 /* Is there a physical entry? */
1953 li r0,0x0100 /* Force on the reference bit whenever we make a PTE valid */
1954 bne+ cr2,gotphys /* Skip down if we have a physical entry */
1955 li r0,0x0180 /* When there is no physical entry, force on
1956 both R and C bits to keep hardware from
1957 updating the PTE to set them. We don't
1958 keep track of RC for I/O areas, so this is ok */
1960 gotphys: lwz r2,mmPTEr(r12) ; Get the second part of the PTE
1961 b insert /* Go insert into the PTEG... */
1963 MustBeOK: li r10,0 /* Get lock clear value */
1964 li r3,T_IN_VAIN /* Say that we handled it */
1965 stw r10,PCAlock(r8) /* Clear the PTEG lock */
1967 #if PERFTIMES && DEBUG
1971 bl EXT(dbgLog2) ; Start of hw_add_map
1975 blr /* Blow back and handle exception */
1980 * We couldn't find it in the mapping list. As a last try, we will
1981 * see if we can autogen it from the block mapped list.
1983 * A block mapped area is defined as a contiguous virtual area that is mapped to
1984 * a contiguous physical area. The olde-tyme IBM VM/XA Interpretive Execution
1985 * architecture referred to this as a V=F, or Virtual = Fixed area.
1987 * We consider a V=F area to be a single entity, adjacent areas can not be merged
1988 * or overlapped. The protection and memory attributes are the same and reference
1989 * and change indications are not kept. The areas are not considered part of the
1990 * physical RAM of the machine and do not have any associated physical table
1991 * entries. Their primary use is intended for mapped I/O areas (e.g., framebuffers)
1992 * although certain areas of RAM, such as the kernel V=R memory, can be mapped.
1994 * We also have a problem in the case of copyin/out: that access is done
1995 * within the kernel for a user address. Unfortunately, the user isn't
1996 * necessarily the current guy. That means that we don't have access to the
1997 * right autogen list. We can't support this kind of access. So, we need to do
1998 * a quick check here and cause a fault if an attempt to copyin or out to
1999 * any autogenned area.
2001 * The lists must be kept short.
2003 * NOTE: kernel_pmap_store must be in V=R storage!!!!!!!!!!!!!!
2008 tryAuto: rlwinm. r11,r3,0,5,24 ; Check if this is a kernel VSID
2009 lis r10,HIGH_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the top part of kernel block map anchor
2010 crandc cr0_eq,cr1_eq,cr0_eq ; Set if kernel access and non-zero VSID (copyin or copyout)
2011 mfsprg r11,0 ; Get the per_proc area
2012 beq- cr0,realFault ; Can not autogen for copyin/copyout...
2013 ori r10,r10,LOW_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the bottom part
2014 beq- cr1,bmInKernel ; We are in kernel... (cr1 set way back at entry)
2016 lwz r10,PP_USERPMAP(r11) ; Get the user pmap
2017 la r10,PMAP_BMAPS(r10) ; Point to the chain anchor
2018 b bmInKernel ; Jump over alignment gap...
2026 #ifndef CHIP_ERRATA_MAX_V1
2028 #endif /* CHIP_ERRATA_MAX_V1 */
2030 bmapLck: lwarx r9,0,r10 ; Get the block map anchor and lock
2031 rlwinm. r5,r9,0,31,31 ; Is it locked?
2032 ori r5,r5,1 ; Set the lock
2033 bne- bmapLckw ; Yeah...
2034 stwcx. r5,0,r10 ; Lock the bmap list
2035 bne- bmapLck ; Someone else was trying, try again...
2036 b bmapSXg ; All done...
2040 bmapLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2041 beq+ bmapLck ; Not no more...
2042 lwz r9,0(r10) ; Get lock word again...
2043 b bmapLckw ; Check it out...
2047 nop ; Force ISYNC to last instruction in IFETCH
2051 bmapSXg: rlwinm. r4,r9,0,0,26 ; Clear out flags and lock
2052 isync ; Make sure we have not used anything yet
2053 bne+ findAuto ; We have something, let us go...
2055 bmapNone: stw r9,0(r10) ; Unlock it, we have nothing here
2056 ; No sync here because we have not changed anything
2059 * When we come here, we know that we can't handle this. Restore whatever
2060 * state that we trashed and go back to continue handling the interrupt.
2063 realFault: li r10,0 /* Get lock clear value */
2064 lwz r3,saveexception(r13) /* Figure out the exception code again */
2065 stw r10,PCAlock(r8) /* Clear the PTEG lock */
2066 #if PERFTIMES && DEBUG
2070 bl EXT(dbgLog2) ; Start of hw_add_map
2074 blr /* Blow back and handle exception */
2078 findAuto: mr. r4,r4 ; Is there more?
2079 beq- bmapNone ; No more...
2080 lwz r5,bmstart(r4) ; Get the bottom of range
2081 lwz r11,bmend(r4) ; Get the top of range
2082 cmplw cr0,r6,r5 ; Are we before the entry?
2083 cmplw cr1,r6,r11 ; Are we after the entry?
2084 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2085 bne+ cr1,faGot ; Found it...
2087 lwz r4,bmnext(r4) ; Get the next one
2088 b findAuto ; Check it out...
2091 lwz r7,blkFlags(r4) ; Get the flags
2092 rlwinm. r7,r7,0,blkRembit,blkRembit ; is this mapping partially removed
2093 bne bmapNone ; Pending remove, bail out
2094 rlwinm r6,r6,0,0,19 ; Round to page
2095 lwz r2,bmPTEr(r4) ; Get the real part of the PTE
2096 sub r5,r6,r5 ; Get offset into area
2097 stw r9,0(r10) ; Unlock it, we are done with it (no sync needed)
2098 add r2,r2,r5 ; Adjust the real address
2100 lis r4,0x8080 /* Indicate that this was autogened */
2101 li r0,0x0180 /* Autogenned areas always set RC bits.
2102 This keeps the hardware from having
2103 to do two storage writes */
2106 * Here where we insert the PTE into the hash. The PTE image is in R3, R2.
2107 * The PTEG allocation controls are a bit map of the state of the PTEG. The
2108 * PCAlock bits are a temporary lock for the specified PTE. PCAfree indicates that
2109 * the PTE slot is empty. PCAauto means that it comes from an autogen area. These
2110 * guys do not keep track of reference and change and are actually "wired".
2111 * They're easy to maintain. PCAsteal
2112 * is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
2113 * fields fit in a single word and are loaded and stored under control of the
2114 * PTEG control area lock (PCAlock).
2116 * Note that PCAauto does not contribute to the steal calculations at all. Originally
2117 * it did, autogens were second in priority. This can result in a pathalogical
2118 * case where an instruction can not make forward progress, or one PTE slot
2121 * Physically, the fields are arranged:
2128 insert: lwz r10,PCAallo(r8) /* Get the PTEG controls */
2129 eqv r6,r6,r6 /* Get all ones */
2130 mr r11,r10 /* Make a copy */
2131 rlwimi r6,r10,8,16,23 /* Insert sliding steal position */
2132 rlwimi r11,r11,24,24,31 /* Duplicate the locked field */
2133 addi r6,r6,-256 /* Form mask */
2134 rlwimi r11,r11,16,0,15 /* This gives us a quadrupled lock mask */
2135 rlwinm r5,r10,31,24,0 /* Slide over the mask for next time */
2136 mr r9,r10 /* Make a copy to test */
2137 not r11,r11 /* Invert the quadrupled lock */
2138 or r2,r2,r0 /* Force on R, and maybe C bit */
2139 and r9,r9,r11 /* Remove the locked guys */
2140 rlwimi r5,r5,8,24,24 /* Wrap bottom bit to top in mask */
2141 rlwimi r9,r11,0,16,31 /* Put two copies of the unlocked entries at the end */
2142 rlwinm r6,r6,0,16,7 ; Remove the autogens from the priority calculations
2143 rlwimi r10,r5,0,24,31 /* Move steal map back in */
2144 and r9,r9,r6 /* Set the starting point for stealing */
2146 /* So, now we have in R9:
2147 byte 0 = ~locked & free
2149 byte 2 = ~locked & (PCAsteal - 1)
2152 Each bit position represents (modulo 8) a PTE. If it is 1, it is available for
2153 allocation at its priority level, left to right.
2155 Additionally, the PCA steal field in R10 has been rotated right one bit.
2159 rlwinm r21,r10,8,0,7 ; Isolate just the old autogen bits
2160 cntlzw r6,r9 /* Allocate a slot */
2161 mr r14,r12 /* Save our mapping for later */
2162 cmplwi r6,32 ; Was there anything available?
2163 rlwinm r7,r6,29,30,31 /* Get the priority slot we got this from */
2164 rlwinm r6,r6,0,29,31 ; Isolate bit position
2165 srw r11,r4,r6 /* Position the PTEG control bits */
2166 slw r21,r21,r6 ; Move corresponding old autogen flag to bit 0
2167 mr r22,r11 ; Get another copy of the selected slot
2169 beq- realFault /* Arghh, no slots! Take the long way 'round... */
2171 /* Remember, we've already set up the mask pattern
2172 depending upon how we got here:
2173 if got here from simple mapping, R4=0x80000000,
2174 if we got here from autogen it is 0x80800000. */
2176 rlwinm r6,r6,3,26,28 /* Start calculating actual PTE address */
2177 rlwimi r22,r22,24,8,15 ; Duplicate selected slot in second byte
2178 rlwinm. r11,r11,0,8,15 /* Isolate just the auto bit (remember about it too) */
2179 andc r10,r10,r22 /* Turn off the free and auto bits */
2180 add r6,r8,r6 /* Get position into PTEG control area */
2181 cmplwi cr1,r7,1 /* Set the condition based upon the old PTE type */
2182 sub r6,r6,r1 /* Switch it to the hash table */
2183 or r10,r10,r11 /* Turn auto on if it is (PTEG control all set up now) */
2184 subi r6,r6,1 /* Point right */
2185 stw r10,PCAallo(r8) /* Allocate our slot */
2186 dcbt br0,r6 ; Touch in the PTE
2187 bne wasauto /* This was autogenned... */
2189 stw r6,mmPTEent(r14) /* Link the mapping to the PTE slot */
2192 * So, now we're here and what exactly do we have? We've got:
2193 * 1) a full PTE entry, both top and bottom words in R3 and R2
2194 * 2) an allocated slot in the PTEG.
2195 * 3) R8 still points to the PTEG Control Area (PCA)
2196 * 4) R6 points to the PTE entry.
2197 * 5) R1 contains length of the hash table-1. We use this to back-translate
2198 * a PTE to a virtual address so we can invalidate TLBs.
2199 * 6) R11 has a copy of the PCA controls we set.
2200 * 7a) R7 indicates what the PTE slot was before we got to it. 0 shows
2201 * that it was empty and 2 or 3, that it was
2202 * a we've stolen a live one. CR1 is set to LT for empty and GT
2204 * 7b) Bit 0 of R21 is 1 if the stolen PTE was autogenned
2205 * 8) So far as our selected PTE, it should be valid if it was stolen
2206 * and invalid if not. We could put some kind of assert here to
2207 * check, but I think that I'd rather leave it in as a mysterious,
2208 * non-reproducable bug.
2209 * 9) The new PTE's mapping has been moved to the front of its PTEG hash list
2210 * so that it's kept in some semblance of a MRU list.
2211 * 10) R14 points to the mapping we're adding.
2213 * So, what do we have to do yet?
2214 * 1) If we stole a slot, we need to invalidate the PTE completely.
2215 * 2) If we stole one AND it was not an autogen,
2216 * copy the entire old PTE (including R and C bits) to its mapping.
2217 * 3) Set the new PTE in the PTEG and make sure it is valid.
2218 * 4) Unlock the PTEG control area.
2219 * 5) Go back to the interrupt handler, changing the interrupt
2220 * code to "in vain" which will restore the registers and bail out.
2223 wasauto: oris r3,r3,0x8000 /* Turn on the valid bit */
2224 blt+ cr1,slamit /* It was empty, go slam it on in... */
2226 lwz r10,0(r6) /* Grab the top part of the PTE */
2227 rlwinm r12,r6,6,4,19 /* Match up the hash to a page boundary */
2228 rlwinm r5,r10,5,4,19 /* Extract the VSID to a page boundary */
2229 rlwinm r10,r10,0,1,31 /* Make it invalid */
2230 xor r12,r5,r12 /* Calculate vaddr */
2231 stw r10,0(r6) /* Invalidate the PTE */
2232 rlwinm r5,r10,7,27,29 ; Move nybble 0 up to subhash position
2233 rlwimi r12,r10,1,0,3 /* Move in the segment portion */
2234 lis r9,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */
2235 xor r5,r5,r10 ; Splooch nybble 0 and 1
2236 rlwimi r12,r10,22,4,9 /* Move in the API */
2237 ori r9,r9,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */
2238 rlwinm r4,r10,27,27,29 ; Get low 3 bits of the VSID for look-aside hash
2240 sync /* Make sure the invalid is stored */
2242 xor r4,r4,r5 ; Finish splooching nybble 0, 1, and the low bits of the VSID
2244 tlbhang: lwarx r5,0,r9 /* Get the TLBIE lock */
2246 rlwinm r4,r4,0,27,29 ; Clean up splooched hash value
2248 mr. r5,r5 /* Is it locked? */
2249 add r4,r4,r8 /* Point to the offset into the PCA area */
2250 li r5,1 /* Get our lock word */
2251 bne- tlbhang /* It's locked, go wait... */
2253 la r4,PCAhash(r4) /* Point to the start of the hash chain for the PTE we're replacing */
2255 stwcx. r5,0,r9 /* Try to get it */
2256 bne- tlbhang /* We was beat... */
2258 mfspr r7,pvr /* Find out what kind of machine we are */
2259 li r5,0 /* Lock clear value */
2260 rlwinm r7,r7,16,16,31 /* Isolate CPU type */
2262 tlbie r12 /* Invalidate it everywhere */
2264 cmplwi r7,3 /* Is this a 603? */
2265 stw r5,0(r9) /* Clear the lock */
2267 beq- its603 /* It's a 603, skip the tlbsync... */
2269 eieio /* Make sure that the tlbie happens first */
2270 tlbsync /* wait for everyone to catch up */
2273 its603: rlwinm. r21,r21,0,0,0 ; See if we just stole an autogenned entry
2274 sync /* Make sure of it all */
2276 bne slamit ; The old was an autogen, time to slam the new in...
2278 lwz r9,4(r6) /* Get the real portion of old PTE */
2279 lwz r7,0(r4) /* Get the first element. We can't get to here
2280 if we aren't working with a mapping... */
2281 mr r0,r7 ; Save pointer to first element
2283 findold: mr r1,r11 ; Save the previous guy
2284 mr. r11,r7 /* Copy and test the chain */
2285 beq- bebad /* Assume it's not zero... */
2287 lwz r5,mmPTEv(r11) /* See if this is the old active one */
2288 cmplw cr2,r11,r14 /* Check if this is actually the new one */
2289 cmplw r5,r10 /* Is this us? (Note: valid bit kept off in mappings) */
2290 lwz r7,mmhashnext(r11) /* Get the next one in line */
2291 beq- cr2,findold /* Don't count the new one... */
2292 cmplw cr2,r11,r0 ; Check if we are first on the list
2293 bne+ findold /* Not it (and assume the worst)... */
2295 lwz r12,mmphysent(r11) /* Get the pointer to the physical entry */
2296 beq- cr2,nomove ; We are first, no need to requeue...
2298 stw r11,0(r4) ; Chain us to the head
2299 stw r0,mmhashnext(r11) ; Chain the old head to us
2300 stw r7,mmhashnext(r1) ; Unlink us
2302 nomove: li r5,0 /* Clear this on out */
2304 mr. r12,r12 /* Is there a physical entry? */
2305 stw r5,mmPTEent(r11) ; Clear the PTE entry pointer
2306 li r5,pepte1 /* Point to the PTE last half */
2307 stw r9,mmPTEr(r11) ; Squirrel away the whole thing (RC bits are in here)
2309 beq- mrgmrcx ; No physical entry for this one...
2311 rlwinm r11,r9,0,23,24 /* Keep only the RC bits */
2313 mrgmrcx: lwarx r9,r5,r12 /* Get the master copy */
2314 or r9,r9,r11 /* Merge in latest RC */
2315 stwcx. r9,r5,r12 /* Save it back */
2316 bne- mrgmrcx /* If it changed, try again... */
2319 * Here's where we finish up. We save the real part of the PTE, eieio it, to make sure it's
2320 * out there before the top half (with the valid bit set).
2323 slamit: stw r2,4(r6) /* Stash the real part */
2324 li r4,0 /* Get a lock clear value */
2325 eieio /* Erect a barricade */
2326 stw r3,0(r6) /* Stash the virtual part and set valid on */
2328 stw r4,PCAlock(r8) /* Clear the PCA lock */
2330 li r3,T_IN_VAIN /* Say that we handled it */
2331 sync /* Go no further until the stores complete */
2332 #if PERFTIMES && DEBUG
2336 bl EXT(dbgLog2) ; Start of hw_add_map
2340 blr /* Back to the fold... */
2342 bebad: lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */
2343 ori r0,r0,LOW_ADDR(Choke)
2344 sc /* Firmware Heimlich maneuver */
2347 * This walks the hash table or DBATs to locate the physical address of a virtual one.
2348 * The space is provided. If it is the kernel space, the DBATs are searched first. Failing
2349 * that, the hash table is accessed. Zero is returned for failure, so it must be special cased.
2350 * This is usually used for debugging, so we try not to rely
2351 * on anything that we don't have to.
2354 ENTRY(LRA, TAG_NO_FRAME_USED)
2356 mfsprg r8,2 ; Get feature flags
2357 mfmsr r10 /* Save the current MSR */
2358 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2359 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2360 mtcrf 0x04,r8 ; Set the features
2361 xoris r5,r3,HIGH_ADDR(PPC_SID_KERNEL) /* Clear the top half if equal */
2362 andi. r9,r10,0x7FCF /* Turn off interrupts and translation */
2363 eqv r12,r12,r12 /* Fill the bottom with foxes */
2365 bt pfNoMSRirb,lraNoMSR ; No MSR...
2367 mtmsr r9 ; Translation and all off
2368 isync ; Toss prefetch
2373 li r0,loadMSR ; Get the MSR setter SC
2374 mr r3,r9 ; Get new MSR
2379 cmplwi r5,LOW_ADDR(PPC_SID_KERNEL) /* See if this is kernel space */
2380 rlwinm r11,r3,6,6,25 /* Position the space for the VSID */
2381 isync /* Purge pipe */
2382 bne- notkernsp /* This is not for the kernel... */
2384 mfspr r5,dbat0u /* Get the virtual address and length */
2385 eqv r8,r8,r8 /* Get all foxes */
2386 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2387 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2388 beq- ckbat1 /* not valid, skip this one... */
2389 sub r7,r4,r7 /* Subtract out the base */
2390 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2391 mfspr r6,dbat0l /* Get the real part */
2392 cmplw r7,r8 /* Check if it is in the range */
2393 bng+ fndbat /* Yup, she's a good un... */
2395 ckbat1: mfspr r5,dbat1u /* Get the virtual address and length */
2396 eqv r8,r8,r8 /* Get all foxes */
2397 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2398 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2399 beq- ckbat2 /* not valid, skip this one... */
2400 sub r7,r4,r7 /* Subtract out the base */
2401 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2402 mfspr r6,dbat1l /* Get the real part */
2403 cmplw r7,r8 /* Check if it is in the range */
2404 bng+ fndbat /* Yup, she's a good un... */
2406 ckbat2: mfspr r5,dbat2u /* Get the virtual address and length */
2407 eqv r8,r8,r8 /* Get all foxes */
2408 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2409 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2410 beq- ckbat3 /* not valid, skip this one... */
2411 sub r7,r4,r7 /* Subtract out the base */
2412 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2413 mfspr r6,dbat2l /* Get the real part */
2414 cmplw r7,r8 /* Check if it is in the range */
2415 bng- fndbat /* Yup, she's a good un... */
2417 ckbat3: mfspr r5,dbat3u /* Get the virtual address and length */
2418 eqv r8,r8,r8 /* Get all foxes */
2419 rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */
2420 rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */
2421 beq- notkernsp /* not valid, skip this one... */
2422 sub r7,r4,r7 /* Subtract out the base */
2423 rlwimi r8,r5,15,0,14 /* Get area length - 1 */
2424 mfspr r6,dbat3l /* Get the real part */
2425 cmplw r7,r8 /* Check if it is in the range */
2426 bgt+ notkernsp /* No good... */
2428 fndbat: rlwinm r6,r6,0,0,14 /* Clean up the real address */
2429 mtmsr r10 /* Restore state */
2430 add r3,r7,r6 /* Relocate the offset to real */
2431 isync /* Purge pipe */
2432 blr /* Bye, bye... */
2434 notkernsp: mfspr r5,sdr1 /* Get hash table base and size */
2435 rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */
2436 rlwimi r12,r5,16,0,15 /* Make table size -1 out of mask */
2437 rlwinm r7,r4,26,10,25 /* Isolate the page index */
2438 andc r5,r5,r12 /* Clean up the hash table */
2439 xor r7,r7,r11 /* Get primary hash */
2440 rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */
2441 and r7,r7,r12 /* Wrap the hash */
2442 rlwimi r11,r4,10,26,31 /* Move API into pte ID */
2443 add r5,r7,r5 /* Point to the PTEG */
2444 oris r11,r11,0x8000 /* Slam on valid bit so's we don't match an invalid one */
2446 li r9,8 /* Get the number of PTEs to check */
2447 lwz r6,0(r5) /* Preload the virtual half */
2449 fndpte: subi r9,r9,1 /* Count the pte */
2450 lwz r3,4(r5) /* Get the real half */
2451 cmplw cr1,r6,r11 /* Is this what we want? */
2452 lwz r6,8(r5) /* Start to get the next virtual half */
2453 mr. r9,r9 /* Any more to try? */
2454 addi r5,r5,8 /* Bump to next slot */
2455 beq cr1,gotxlate /* We found what we were looking for... */
2456 bne+ fndpte /* Go try the next PTE... */
2458 mtmsr r10 /* Restore state */
2459 li r3,0 /* Show failure */
2460 isync /* Purge pipe */
2463 gotxlate: mtmsr r10 /* Restore state */
2464 rlwimi r3,r4,0,20,31 /* Cram in the page displacement */
2465 isync /* Purge pipe */
2471 * struct blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr)
2473 * This is used to add a block mapping entry to the MRU list whose top
2474 * node is anchored at bmaps. This is a real address and is also used as
2477 * Overlapping areas are not allowed. If we find one, we return it's address and
2478 * expect the upper layers to panic. We only check this for a debug build...
2483 .globl EXT(hw_add_blk)
2487 mfsprg r9,2 ; Get feature flags
2488 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2489 mfmsr r0 /* Save the MSR */
2490 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2491 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2492 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2493 mtcrf 0x04,r9 ; Set the features
2494 xor r3,r3,r6 ; Get real address of bmap anchor
2495 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2496 la r3,PMAP_BMAPS(r3) ; Point to bmap header
2498 bt pfNoMSRirb,habNoMSR ; No MSR...
2500 mtmsr r12 ; Translation and all off
2501 isync ; Toss prefetch
2507 li r0,loadMSR ; Get the MSR setter SC
2508 mr r3,r12 ; Get new MSR
2514 abLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2515 rlwinm. r8,r9,0,31,31 ; Is it locked?
2516 ori r8,r9,1 ; Set the lock
2517 bne- abLckw ; Yeah...
2518 stwcx. r8,0,r3 ; Lock the bmap list
2519 bne- abLck ; Someone else was trying, try again...
2520 b abSXg ; All done...
2524 abLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
2525 beq+ abLck ; Not no more...
2526 lwz r9,0(r3) ; Get lock word again...
2527 b abLckw ; Check it out...
2531 nop ; Force ISYNC to last instruction in IFETCH
2534 abSXg: rlwinm r11,r9,0,0,26 ; Clear out flags and lock
2535 isync ; Make sure we have not used anything yet
2541 lwz r7,bmstart(r4) ; Get start
2542 lwz r8,bmend(r4) ; Get end
2543 mr r2,r11 ; Get chain
2545 abChk: mr. r10,r2 ; End of chain?
2546 beq abChkD ; Yes, chain is ok...
2547 lwz r5,bmstart(r10) ; Get start of current area
2548 lwz r6,bmend(r10) ; Get end of current area
2550 cmplw cr0,r8,r5 ; Is the end of the new before the old?
2551 cmplw cr1,r8,r6 ; Is the end of the new after the old?
2552 cmplw cr6,r6,r7 ; Is the end of the old before the new?
2553 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in old
2554 cmplw cr7,r6,r8 ; Is the end of the old after the new?
2555 lwz r2,bmnext(r10) ; Get pointer to the next
2556 cror cr6_eq,cr6_lt,cr7_gt ; Set cr2_eq if old not in new
2557 crand cr1_eq,cr1_eq,cr6_eq ; Set cr1_eq if no overlap
2558 beq+ cr1,abChk ; Ok check the next...
2560 lwz r8,blkFlags(r10) ; Get the flags
2561 rlwinm. r8,r8,0,blkRembit,blkRembit ; Check the blkRem bit
2562 beq abRet ; Is the mapping partially removed
2563 ori r10,r10,2 ; Indicate that this block is partially removed
2565 stw r9,0(r3) ; Unlock
2566 mtmsr r0 ; Restore xlation and rupts
2567 mr r3,r10 ; Pass back the overlap
2571 abChkD: stw r11,bmnext(r4) ; Chain this on in
2572 rlwimi r4,r9,0,27,31 ; Copy in locks and flags
2573 sync ; Make sure that is done
2575 stw r4,0(r3) ; Unlock and chain the new first one
2576 mtmsr r0 ; Restore xlation and rupts
2577 li r3,0 ; Pass back a no failure return code
2583 * struct blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2585 * This is used to remove a block mapping entry from the list that
2586 * is anchored at bmaps. bmaps is a virtual address and is also used as
2589 * Note that this function clears a single block that contains
2590 * any address within the range sva to eva (inclusive). To entirely
2591 * clear any range, hw_rem_blk must be called repeatedly until it
2594 * The block is removed from the list and all hash table entries
2595 * corresponding to the mapped block are invalidated and the TLB
2596 * entries are purged. If the block is large, this could take
2597 * quite a while. We need to hash every possible address in the
2598 * range and lock down the PCA.
2600 * If we attempt to remove a permanent entry, we will not do it.
2601 * The block address will be ored with 1 and returned.
2607 .globl EXT(hw_rem_blk)
2611 mfsprg r9,2 ; Get feature flags
2612 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
2613 mfmsr r0 /* Save the MSR */
2614 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2615 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2616 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
2617 mtcrf 0x04,r9 ; Set the features
2618 xor r3,r3,r6 ; Get real address of bmap anchor
2619 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
2620 la r3,PMAP_BMAPS(r3) ; Point to the bmap chain head
2622 bt pfNoMSRirb,hrbNoMSR ; No MSR...
2624 mtmsr r12 ; Translation and all off
2625 isync ; Toss prefetch
2631 li r0,loadMSR ; Get the MSR setter SC
2632 mr r3,r12 ; Get new MSR
2638 cmp cr5,r0,r7 ; Request to invalidate the ptes
2642 lwz r4,bmstart(r10) ; Get start of current mapping
2643 lwz r5,bmend(r10) ; Get end of current mapping
2644 cmp cr5,r3,r3 ; Request to unlink the mapping
2646 rbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
2647 rlwinm. r8,r9,0,31,31 ; Is it locked?
2648 ori r8,r9,1 ; Set the lock
2649 bne- rbLckw ; Yeah...
2650 stwcx. r8,0,r3 ; Lock the bmap list
2651 bne- rbLck ; Someone else was trying, try again...
2652 b rbSXg ; All done...
2656 rbLckw: rlwinm. r11,r9,0,31,31 ; Check if it is still held
2657 beq+ rbLck ; Not no more...
2658 lwz r9,0(r3) ; Get lock word again...
2659 b rbLckw ; Check it out...
2663 nop ; Force ISYNC to last instruction in IFETCH
2666 rbSXg: rlwinm. r2,r9,0,0,26 ; Clear out flags and lock
2667 mr r10,r3 ; Keep anchor as previous pointer
2668 isync ; Make sure we have not used anything yet
2670 beq- rbMT ; There is nothing in the list
2672 rbChk: mr r12,r10 ; Save the previous
2673 mr. r10,r2 ; End of chain?
2674 beq rbMT ; Yes, nothing to do...
2675 lwz r11,bmstart(r10) ; Get start of current area
2676 lwz r6,bmend(r10) ; Get end of current area
2678 cmplw cr0,r5,r11 ; Is the end of range before the start of the area?
2679 cmplw cr1,r4,r6 ; Is the start of range after the end of the area?
2680 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
2681 lwz r2,bmnext(r10) ; Get the next one
2682 beq+ cr1,rbChk ; Not this one, check the next...
2684 cmplw cr1,r12,r3 ; Is the current mapping the first one?
2686 bne cr5,rbblkRem ; Do we have to unchain the mapping
2688 bne cr1,rbnFirst ; Yes, is this the first mapping?
2689 rlwimi r9,r2,0,0,26 ; Yes, Change the lock value
2690 ori r2,r9,1 ; Turn on the lock bit
2692 stw r2,bmnext(r12) ; Unchain us
2698 lwz r8,blkFlags(r10) ; Get the flags
2700 rlwinm. r7,r8,0,blkPermbit,blkPermbit ; is this a permanent block?
2702 bne- rbPerm ; This is permanent, do not remove...
2704 rlwinm. r7,r8,0,blkRembit,blkRembit ; is this mapping partially removed
2706 beq rbblkRemcont ; If not, check the max size
2707 lwz r11,bmcurrent(r10) ; If yes, resume for the current page
2709 cmp cr5,r11,r6 ; No partial remove left
2710 beq cr5, rbpendret ; But there is a pending remove
2713 bne rbblkRemcont1 ; Is it the first remove
2715 oris r8,r8,hi16(blkRem) ; Yes
2716 stw r8,blkFlags(r10) ; set the blkRem bit in blkFlags
2719 lis r5,hi16(BLKREMMAX*4096) ; Load maximun size tear down
2720 ori r5,r5,lo16(BLKREMMAX*4096) ; Load maximun size tear down
2721 sub r7,r6,r11 ; Get the remaining size to tear down
2722 cmp cr5,r7,r5 ; Compare against the maximun size
2723 ble cr5,rbfullblk ; If less or equal, go remove the mapping
2725 add r7,r11,r5 ; Add the max size tear down to the current page
2726 stw r7,bmcurrent(r10) ; Update the current page
2727 subi r6,r7,1 ; Set the current end of the partial tear down
2731 stw r6,bmcurrent(r10) ; Update the current page
2734 lwz r8,bmspace(r10) ; Get the VSID
2736 stw r9,0(r3) ; Unlock and chain the new first one
2738 eqv r4,r4,r4 ; Fill the bottom with foxes
2739 mfspr r12,sdr1 ; Get hash table base and size
2740 rlwinm r8,r8,6,0,25 ; Align VSID to PTEG
2741 rlwimi r4,r12,16,0,15 ; Make table size - 1 out of mask
2742 andc r12,r12,r4 ; Clean up address of hash table
2743 rlwinm r5,r11,26,6,25 ; Rotate virtual start address into PTEG units
2744 add r12,r12,r4 ; Point to PCA - 1
2745 rlwinm r6,r6,26,6,25 ; Rotate virtual end address into PTEG units
2746 addi r12,r12,1 ; Point to PCA base
2747 sub r6,r6,r5 ; Get the total number of PTEGs to clear
2748 cmplw r6,r4 ; See if this wraps all the way around
2749 blt rbHash ; Nope, length is right
2750 subi r6,r4,32+31 ; Back down to correct length
2752 rbHash: rlwinm r5,r5,0,10,25 ; Keep only the page index
2753 xor r2,r8,r5 ; Hash into table
2754 and r2,r2,r4 ; Wrap into the table
2755 add r2,r2,r12 ; Point right at the PCA
2757 rbLcka: lwarx r7,0,r2 ; Get the PTEG lock
2758 mr. r7,r7 ; Is it locked?
2759 bne- rbLckwa ; Yeah...
2760 li r7,1 ; Get the locked value
2761 stwcx. r7,0,r2 ; Take it
2762 bne- rbLcka ; Someone else was trying, try again...
2763 b rbSXga ; All done...
2765 rbLckwa: mr. r7,r7 ; Check if it is already held
2766 beq+ rbLcka ; It is clear...
2767 lwz r7,0(r2) ; Get lock word again...
2770 rbSXga: isync ; Make sure nothing used yet
2771 lwz r7,PCAallo(r2) ; Get the allocation word
2772 rlwinm. r11,r7,8,0,7 ; Isolate the autogenerated PTEs
2773 or r7,r7,r11 ; Release the autogen slots
2774 beq+ rbAintNone ; There are not any here
2775 mtcrf 0xC0,r11 ; Set the branch masks for autogens
2776 sub r11,r2,r4 ; Move back to the hash table + 1
2777 rlwinm r7,r7,0,16,7 ; Clear the autogen field
2778 subi r11,r11,1 ; Point to the PTEG
2779 stw r7,PCAallo(r2) ; Update the flags
2780 li r7,0 ; Get an invalid PTE value
2782 bf 0,rbSlot1 ; No autogen here
2783 stw r7,0x00(r11) ; Invalidate PTE
2784 rbSlot1: bf 1,rbSlot2 ; No autogen here
2785 stw r7,0x08(r11) ; Invalidate PTE
2786 rbSlot2: bf 2,rbSlot3 ; No autogen here
2787 stw r7,0x10(r11) ; Invalidate PTE
2788 rbSlot3: bf 3,rbSlot4 ; No autogen here
2789 stw r7,0x18(r11) ; Invalidate PTE
2790 rbSlot4: bf 4,rbSlot5 ; No autogen here
2791 stw r7,0x20(r11) ; Invalidate PTE
2792 rbSlot5: bf 5,rbSlot6 ; No autogen here
2793 stw r7,0x28(r11) ; Invalidate PTE
2794 rbSlot6: bf 6,rbSlot7 ; No autogen here
2795 stw r7,0x30(r11) ; Invalidate PTE
2796 rbSlot7: bf 7,rbSlotx ; No autogen here
2797 stw r7,0x38(r11) ; Invalidate PTE
2800 rbAintNone: li r7,0 ; Clear this out
2801 sync ; To make SMP happy
2802 addic. r6,r6,-64 ; Decrement the count
2803 stw r7,PCAlock(r2) ; Release the PTEG lock
2804 addi r5,r5,64 ; Move up by adjusted page number
2805 bge+ rbHash ; Not done...
2807 sync ; Make sure the memory is quiet
2810 ; Here we take the easy way out and just purge the entire TLB. This is
2811 ; certainly faster and definitly easier than blasting just the correct ones
2812 ; in the range, we only need one lock and one TLBSYNC. We would hope
2813 ; that most blocks are more than 64 pages (256K) and on every machine
2814 ; up to Book E, 64 TLBIEs will invalidate the entire table.
2817 li r5,64 ; Get number of TLB entries to purge
2818 lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock
2819 li r6,0 ; Start at 0
2820 ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part
2822 rbTlbL: lwarx r2,0,r12 ; Get the TLBIE lock
2823 mr. r2,r2 ; Is it locked?
2824 li r2,1 ; Get our lock value
2825 bne- rbTlbL ; It is locked, go wait...
2826 stwcx. r2,0,r12 ; Try to get it
2827 bne- rbTlbL ; We was beat...
2829 rbTlbN: addic. r5,r5,-1 ; See if we did them all
2830 tlbie r6 ; Invalidate it everywhere
2831 addi r6,r6,0x1000 ; Up to the next page
2832 bgt+ rbTlbN ; Make sure we have done it all...
2834 mfspr r5,pvr ; Find out what kind of machine we are
2835 li r2,0 ; Lock clear value
2837 rlwinm r5,r5,16,16,31 ; Isolate CPU type
2838 cmplwi r5,3 ; Is this a 603?
2839 sync ; Make sure all is quiet
2840 beq- rbits603a ; It is a 603, skip the tlbsync...
2842 eieio ; Make sure that the tlbie happens first
2843 tlbsync ; wait for everyone to catch up
2846 rbits603a: sync ; Wait for quiet again
2847 stw r2,0(r12) ; Unlock invalidates
2849 sync ; Make sure that is done
2851 ble cr5,rbunlink ; If all ptes are flush, go unlink the mapping
2852 mtmsr r0 ; Restore xlation and rupts
2853 mr r3,r10 ; Pass back the removed block in progress
2854 ori r3,r3,2 ; Indicate that the block remove isn't completed yet
2859 stw r9,0(r3) ; Unlock
2860 mtmsr r0 ; Restore xlation and rupts
2861 mr r3,r10 ; Pass back the removed block in progress
2862 ori r3,r3,2 ; Indicate that the block remove isn't completed yet
2867 rbMT: stw r9,0(r3) ; Unlock
2868 mtmsr r0 ; Restore xlation and rupts
2869 li r3,0 ; Say we did not find one
2873 rbPerm: stw r9,0(r3) ; Unlock
2874 mtmsr r0 ; Restore xlation and rupts
2875 ori r3,r10,1 ; Say we did not remove it
2879 rbDone: stw r9,0(r3) ; Unlock
2880 mtmsr r0 ; Restore xlation and rupts
2881 mr r3,r10 ; Pass back the removed block
2886 * hw_select_mappings(struct mappingflush *mappingflush)
2889 * Ouput: up to 8 user mappings
2891 * hw_select_mappings() scans every PCA mapping hash lists and select
2892 * the last user mapping if it exists.
2897 .globl EXT(hw_select_mappings)
2899 LEXT(hw_select_mappings)
2900 mr r5,r3 ; Get the mapping flush addr
2901 mfmsr r12 ; Get the MSR
2902 rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
2903 rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
2904 mfsprg r9,2 ; Get feature flags
2905 andi. r0,r12,0x7FCF ; Disable translation and interruptions
2906 mtcrf 0x04,r9 ; Set the features
2907 bt pfNoMSRirb,hvmNoMSR ; No MSR...
2912 mr r3,r0 ; Get the new MSR
2913 li r0,loadMSR ; Get the MSR setter SC
2917 li r11,1 ; Get the locked value
2920 lwz r3,MFpcaptr(r5) ; Get the PCA pointer
2921 lwarx r10,0,r3 ; Get the PTEG lock
2922 mr. r10,r10 ; Is it locked?
2923 bne- hvmptegLckwx ; Yeah...
2924 stwcx. r11,0,r3 ; Take take it
2925 bne- hvmptegLckx ; Someone else was trying, try again...
2926 b hvmptegSXgx ; All done...
2931 mr. r10,r10 ; Check if it is already held
2932 beq+ hvmptegLckx ; It's clear...
2933 lwz r10,0(r3) ; Get lock word again...
2934 b hvmptegLckwx ; Wait...
2939 isync ; Make sure we haven't used anything yet
2941 li r11,8 ; set count to 8
2943 lwz r6,PCAhash(r3) ; load the first mapping hash list
2944 la r12,PCAhash(r3) ; Point to the mapping hash area
2945 la r4,MFmapping(r5) ; Point to the mapping flush mapping area
2947 stw r7,MFmappingcnt(r5) ; Set the current count to 0
2949 li r10,0 ; Mapping test
2952 mr. r6,r6 ; Test if the hash list current pointer is zero
2953 beq hvmfindmapret ; Did we hit the end of the hash list
2954 lwz r7,mmPTEv(r6) ; Pick up our virtual ID
2955 rlwinm r8,r7,5,0,19 ; Pick VSID 20 lower bits
2957 beq hvmfindmapnext ; Skip Kernel VSIDs
2958 rlwinm r8,r7,1,0,3 ; Extract the Segment index
2959 rlwinm r9,r7,22,4,9 ; Extract API 6 upper bits
2960 or r8,r8,r9 ; Add to the virtual address
2961 rlwinm r9,r7,31,6,25 ; Pick VSID 19 lower bits
2962 xor r9,r9,r3 ; Exclusive or with the PCA address
2963 rlwinm r9,r9,6,10,19 ; Extract API 10 lower bits
2964 or r8,r8,r9 ; Add to the virtual address
2966 stw r8,4(r4) ; Store the virtual address
2967 lwz r8,mmpmap(r6) ; Get the pmap
2968 stw r8,0(r4) ; Store the pmap
2969 li r10,1 ; Found one
2972 lwz r6,mmhashnext(r6) ; Pick up next mapping block
2973 b hvmfindmap ; Scan the next mapping
2975 mr. r10,r10 ; Found mapping
2976 beq hvmnexthashprep ; If not, do not update the mappingflush array
2977 lwz r7,MFmappingcnt(r5) ; Get the current count
2978 addi r7,r7,1 ; Increment the current count
2979 stw r7,MFmappingcnt(r5) ; Store the current count
2980 addi r4,r4,MFmappingSize ; Point to the next mapping flush entry
2982 addi r12,r12,4 ; Load the next hash list
2983 lwz r6,0(r12) ; Load the next hash list entry
2984 subi r11,r11,1 ; Decrement hash list index
2985 mr. r11,r11 ; Test for a remaining hash list
2986 bne hvmnexthash ; Loop to scan the next hash list
2989 stw r10,0(r3) ; Unlock the hash list
2990 mtmsr r0 ; Restore translation and interruptions
2995 * vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va)
2997 * This is used to translate a virtual address within a block mapping entry
2998 * to a physical address. If not found, 0 is returned.
3003 .globl EXT(hw_cvp_blk)
3007 mfsprg r9,2 ; Get feature flags
3008 lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation
3009 mfmsr r0 /* Save the MSR */
3010 rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3011 rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3012 rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */
3013 mtcrf 0x04,r9 ; Set the features
3014 xor r3,r3,r6 ; Get real address of bmap anchor
3015 rlwinm r12,r12,0,28,25 /* Clear IR and DR */
3016 la r3,PMAP_BMAPS(r3) ; Point to chain header
3018 bt pfNoMSRirb,hcbNoMSR ; No MSR...
3020 mtmsr r12 ; Translation and all off
3021 isync ; Toss prefetch
3027 li r0,loadMSR ; Get the MSR setter SC
3028 mr r3,r12 ; Get new MSR
3034 cbLck: lwarx r9,0,r3 ; Get the block map anchor and lock
3035 rlwinm. r8,r9,0,31,31 ; Is it locked?
3036 ori r8,r9,1 ; Set the lock
3037 bne- cbLckw ; Yeah...
3038 stwcx. r8,0,r3 ; Lock the bmap list
3039 bne- cbLck ; Someone else was trying, try again...
3040 b cbSXg ; All done...
3044 cbLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held
3045 beq+ cbLck ; Not no more...
3046 lwz r9,0(r3) ; Get lock word again...
3047 b cbLckw ; Check it out...
3051 nop ; Force ISYNC to last instruction in IFETCH
3057 cbSXg: rlwinm. r11,r9,0,0,26 ; Clear out flags and lock
3058 li r2,0 ; Assume we do not find anything
3059 isync ; Make sure we have not used anything yet
3061 cbChk: mr. r11,r11 ; Is there more?
3062 beq- cbDone ; No more...
3063 lwz r5,bmstart(r11) ; Get the bottom of range
3064 lwz r12,bmend(r11) ; Get the top of range
3065 cmplw cr0,r4,r5 ; Are we before the entry?
3066 cmplw cr1,r4,r12 ; Are we after of the entry?
3067 cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range
3068 beq- cr1,cbNo ; We are not in the range...
3070 lwz r2,bmPTEr(r11) ; Get the real part of the PTE
3071 sub r5,r4,r5 ; Get offset into area
3072 rlwinm r2,r2,0,0,19 ; Clean out everything but the page
3073 add r2,r2,r5 ; Adjust the real address
3075 cbDone: stw r9,0(r3) ; Unlock it, we are done with it (no sync needed)
3076 mtmsr r0 ; Restore translation and interrupts...
3077 isync ; Make sure it is on
3078 mr r3,r2 ; Set return physical address
3083 cbNo: lwz r11,bmnext(r11) ; Link next
3084 b cbChk ; Check it out...
3088 * hw_set_user_space(pmap)
3089 * hw_set_user_space_dis(pmap)
3091 * Indicate whether memory space needs to be switched.
3092 * We really need to turn off interrupts here, because we need to be non-preemptable
3094 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
3095 * register usage here. The VMM switch code in vmachmon.s that calls this
3096 * know what registers are in use. Check that if these change.
3102 .globl EXT(hw_set_user_space)
3104 LEXT(hw_set_user_space)
3106 mfmsr r10 /* Get the current MSR */
3107 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3108 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3109 rlwinm r9,r10,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off 'rupts */
3110 mtmsr r9 /* Disable 'em */
3111 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
3112 lwz r4,PMAP_SPACE(r3) ; Get the space
3113 mfsprg r6,0 /* Get the per_proc_info address */
3114 xor r3,r3,r7 ; Get real address of bmap anchor
3115 stw r4,PP_USERSPACE(r6) /* Show our new address space */
3116 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
3117 mtmsr r10 /* Restore interruptions */
3121 .globl EXT(hw_set_user_space_dis)
3123 LEXT(hw_set_user_space_dis)
3125 lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation
3126 lwz r4,PMAP_SPACE(r3) ; Get the space
3127 mfsprg r6,0 ; Get the per_proc_info address
3128 xor r3,r3,r7 ; Get real address of bmap anchor
3129 stw r4,PP_USERSPACE(r6) ; Show our new address space
3130 stw r3,PP_USERPMAP(r6) ; Show our real pmap address
3134 /* struct mapping *hw_cpv(struct mapping *mp) - Converts a physcial mapping CB address to virtual
3143 rlwinm. r4,r3,0,0,19 ; Round back to the mapping block allocation control block
3144 mfmsr r10 ; Get the current MSR
3145 beq- hcpvret ; Skip if we are passed a 0...
3146 rlwinm r10,r10,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3147 rlwinm r10,r10,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3148 andi. r9,r10,0x7FEF ; Turn off interrupts and data translation
3149 mtmsr r9 ; Disable DR and EE
3152 lwz r4,mbvrswap(r4) ; Get the conversion value
3153 mtmsr r10 ; Interrupts and DR back on
3155 xor r3,r3,r4 ; Convert to physical
3157 hcpvret: rlwinm r3,r3,0,0,26 ; Clean out any flags
3161 /* struct mapping *hw_cvp(struct mapping *mp) - Converts a virtual mapping CB address to physcial
3163 * Translation must be on for this
3172 rlwinm r4,r3,0,0,19 ; Round back to the mapping block allocation control block
3173 rlwinm r3,r3,0,0,26 ; Clean out any flags
3174 lwz r4,mbvrswap(r4) ; Get the conversion value
3175 xor r3,r3,r4 ; Convert to virtual
3179 /* int mapalc(struct mappingblok *mb) - Finds, allocates, and checks a free mapping entry in a block
3181 * Lock must already be held on mapping block list
3182 * returns 0 if all slots filled.
3183 * returns n if a slot is found and it is not the last
3184 * returns -n if a slot os found and it is the last
3185 * when n and -n are returned, the corresponding bit is cleared
3194 lwz r4,mbfree(r3) ; Get the first mask
3195 lis r0,0x8000 ; Get the mask to clear the first free bit
3196 lwz r5,mbfree+4(r3) ; Get the second mask
3197 mr r12,r3 ; Save the return
3198 cntlzw r8,r4 ; Get first free field
3199 lwz r6,mbfree+8(r3) ; Get the third mask
3200 srw. r9,r0,r8 ; Get bit corresponding to first free one
3201 lwz r7,mbfree+12(r3) ; Get the fourth mask
3202 cntlzw r10,r5 ; Get first free field in second word
3203 andc r4,r4,r9 ; Turn it off
3204 bne malcfnd0 ; Found one...
3206 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3207 cntlzw r11,r6 ; Get first free field in third word
3208 andc r5,r5,r9 ; Turn it off
3209 bne malcfnd1 ; Found one...
3211 srw. r9,r0,r11 ; Get bit corresponding to first free one in third word
3212 cntlzw r10,r7 ; Get first free field in fourth word
3213 andc r6,r6,r9 ; Turn it off
3214 bne malcfnd2 ; Found one...
3216 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
3217 li r3,0 ; Assume abject failure
3218 andc r7,r7,r9 ; Turn it off
3219 beqlr ; There are none any left...
3221 addi r3,r10,96 ; Set the correct bit number
3222 stw r7,mbfree+12(r12) ; Actually allocate the slot
3224 mapafin: or r4,r4,r5 ; Merge the first two allocation maps
3225 or r6,r6,r7 ; Then the last two
3226 or. r4,r4,r6 ; Merge both halves
3227 bnelr+ ; Return if some left for next time...
3229 neg r3,r3 ; Indicate we just allocated the last one
3232 malcfnd0: stw r4,mbfree(r12) ; Actually allocate the slot
3233 mr r3,r8 ; Set the correct bit number
3234 b mapafin ; Exit now...
3236 malcfnd1: stw r5,mbfree+4(r12) ; Actually allocate the slot
3237 addi r3,r10,32 ; Set the correct bit number
3238 b mapafin ; Exit now...
3240 malcfnd2: stw r6,mbfree+8(r12) ; Actually allocate the slot
3241 addi r3,r11,64 ; Set the correct bit number
3242 b mapafin ; Exit now...
3246 * Log out all memory usage
3254 mfmsr r2 ; Get the MSR
3255 lis r10,hi16(EXT(DebugWork)) ; High part of area
3256 rlwinm r2,r2,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
3257 lis r12,hi16(EXT(mem_actual)) ; High part of actual
3258 rlwinm r2,r2,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
3259 andi. r0,r2,0x7FCF ; Interrupts and translation off
3260 ori r10,r10,lo16(EXT(DebugWork)) ; Get the entry
3261 mtmsr r0 ; Turn stuff off
3262 ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual
3267 stw r0,4(r10) ; Force logging off
3268 lwz r0,0(r12) ; Get the end of memory
3270 lis r12,hi16(EXT(mem_size)) ; High part of defined memory
3271 ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory
3272 lwz r12,0(r12) ; Make it end of defined
3274 cmplw r0,r12 ; Is there room for the data?
3275 ble- logmemexit ; No, do not even try...
3277 stw r12,0(r12) ; Set defined memory size
3278 stw r0,4(r12) ; Set the actual amount of memory
3280 lis r3,hi16(EXT(hash_table_base)) ; Hash table address
3281 lis r4,hi16(EXT(hash_table_size)) ; Hash table size
3282 lis r5,hi16(EXT(pmap_mem_regions)) ; Memory regions
3283 lis r6,hi16(EXT(mapCtl)) ; Mappings
3284 ori r3,r3,lo16(EXT(hash_table_base))
3285 ori r4,r4,lo16(EXT(hash_table_size))
3286 ori r5,r5,lo16(EXT(pmap_mem_regions))
3287 ori r6,r6,lo16(EXT(mapCtl))
3290 lwz r5,4(r5) ; Get the pointer to the phys_ent table
3291 lwz r6,0(r6) ; Get the pointer to the current mapping block
3292 stw r3,8(r12) ; Save the hash table address
3293 stw r4,12(r12) ; Save the hash table size
3294 stw r5,16(r12) ; Save the physent pointer
3295 stw r6,20(r12) ; Save the mappings
3297 addi r11,r12,0x1000 ; Point to area to move hash table and PCA
3299 add r4,r4,r4 ; Double size for both
3301 copyhash: lwz r7,0(r3) ; Copy both of them
3314 rlwinm r4,r12,20,12,31 ; Get number of phys_ents
3316 copyphys: lwz r7,0(r5) ; Copy physents
3325 addi r11,r11,4095 ; Round up to next page
3326 rlwinm r11,r11,0,0,19
3328 lwz r4,4(r6) ; Get the size of the mapping area
3330 copymaps: lwz r7,0(r6) ; Copy the mappings
3343 sub r11,r11,r12 ; Get the total length we saved
3344 stw r11,24(r12) ; Save the size
3346 logmemexit: mtmsr r2 ; Back to normal