2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
24 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
25 * Currently, some of the function of this module is contained within pmap.c. We may want to move
26 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
28 * We also depend upon the structure of the phys_entry control block. We do put some processor
29 * specific stuff in there.
35 #include <mach_kgdb.h>
36 #include <mach_vm_debug.h>
37 #include <db_machine_commands.h>
39 #include <kern/thread.h>
40 #include <kern/thread_act.h>
41 #include <mach/vm_attributes.h>
42 #include <mach/vm_param.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_page.h>
48 #include <kern/misc_protos.h>
49 #include <ppc/misc_protos.h>
50 #include <ppc/proc_reg.h>
54 #include <ppc/pmap_internals.h>
57 #include <ppc/new_screen.h>
58 #include <ppc/Firmware.h>
59 #include <ppc/mappings.h>
60 #include <ddb/db_output.h>
62 #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
66 #if PERFTIMES && DEBUG
67 #define debugLog2(a, b, c) dbgLog2(a, b, c)
69 #define debugLog2(a, b, c)
72 vm_map_t mapping_map
= VM_MAP_NULL
;
74 unsigned int incrVSID
= 0; /* VSID increment value */
75 unsigned int mappingdeb0
= 0;
76 unsigned int mappingdeb1
= 0;
77 extern unsigned int hash_table_size
;
78 extern vm_offset_t mem_size
;
80 * ppc_prot translates from the mach representation of protections to the PPC version.
81 * We also allow for a direct setting of the protection bits. This extends the mach
82 * concepts to allow the greater control we need for Virtual Machines (VMM).
83 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
84 * It eliminates the used of this table.
85 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
88 #define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
91 * About PPC VSID generation:
93 * This function is called to generate an address space ID. This space ID must be unique within
94 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
95 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
96 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
97 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
98 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
99 * they are release. This causes us to lose track of what space IDs are free to be reused.
100 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
101 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
103 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
104 * calculation for virtual address lookup. An improperly chosen value could potentially cause
105 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
106 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
107 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
108 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
109 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
110 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
111 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
112 * with no overflow. I don't think that this is a problem.
114 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
115 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
116 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
117 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
118 * the same modulo 512. We can reduce this problem by having the segment number be bits
119 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
120 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
121 * I don't think that it is as signifigant as the other, so, I'll make the space ID
122 * with segment first.
124 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
125 * While this is a problem that should only happen in periods counted in weeks, it can and
126 * will happen. This is assuming a monotonically increasing space ID. If we were to search
127 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
128 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
130 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
131 * locked by free_pmap_lock) that is sorted in VSID sequence order.
133 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
134 * the last that was freed. The we allocate that.
136 * NOTE: We must be called with interruptions off and free_pmap_lock held.
142 * Do anything that needs to be done before the mapping system can be used.
143 * Hash table must be initialized before we call this.
145 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
148 void mapping_init(void) {
152 __asm__
volatile("cntlzw %0, %1" : "=r" (tmp
) : "r" (hash_table_size
)); /* Get number of leading 0s */
154 incrVSID
= 1 << ((32 - tmp
+ 1) >> 1); /* Get ceiling of sqrt of table size */
155 incrVSID
|= 1 << ((32 - tmp
+ 1) >> 2); /* Get ceiling of quadroot of table size */
156 incrVSID
|= 1; /* Set bit and add 1 */
163 * mapping_remove(pmap_t pmap, vm_offset_t va);
164 * Given a pmap and virtual address, this routine finds the mapping and removes it from
165 * both its PTEG hash list and the physical entry list. The mapping block will be added to
166 * the free list. If the free list threshold is reached, garbage collection will happen.
167 * We also kick back a return code to say whether or not we had one to remove.
169 * We have a strict ordering here: the mapping must be removed from the PTEG hash list before
170 * it can be removed from the physical entry list. This allows us to get by with only the PTEG
171 * hash lock at page fault time. The physical entry lock must be held while we remove the mapping
172 * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions,
173 * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
174 * It's just that simple!
176 * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
177 * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG
178 * lock to control the hash cahin and may move the position of the mapping for MRU calculations.
180 * Note that mappings do not need to point to a physical entry. When they don't, it indicates
181 * the mapping is outside of physical memory and usually refers to a memory mapped device of
182 * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock
183 * routines return normally, but don't do anything.
186 boolean_t
mapping_remove(pmap_t pmap
, vm_offset_t va
) { /* Remove a single mapping for this VADDR
187 Returns TRUE if a mapping was found to remove */
190 register blokmap
*blm
;
192 unsigned int *useadd
, *useaddr
;
195 debugLog2(1, va
, pmap
->space
); /* start mapping_remove */
197 s
=splhigh(); /* Don't bother me */
199 mp
= hw_lock_phys_vir(pmap
->space
, va
); /* Lock the physical entry for this mapping */
201 if(!mp
) { /* Did we find one? */
202 splx(s
); /* Allow 'rupts now */
203 if(mp
= (mapping
*)hw_rem_blk(pmap
, va
, va
)) { /* No normal pages, try to remove an odd-sized one */
205 if((unsigned int)mp
& 1) { /* Make sure we don't unmap a permanent one */
206 blm
= (blokmap
*)hw_cpv((mapping
*)((unsigned int)mp
& 0xFFFFFFFC)); /* Get virtual address */
207 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
210 while ((unsigned int)mp
& 2)
211 mp
= (mapping
*)hw_rem_blk(pmap
, va
, va
);
213 blm
= (blokmap
*)hw_cpv(mp
); /* (TEST/DEBUG) */
214 kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
215 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
217 mapping_free(hw_cpv(mp
)); /* Release it */
218 debugLog2(2, 1, 0); /* End mapping_remove */
219 return TRUE
; /* Tell them we did it */
221 debugLog2(2, 0, 0); /* end mapping_remove */
222 return FALSE
; /* Didn't find any, return FALSE... */
224 if((unsigned int)mp
&1) { /* Did we timeout? */
225 panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */
226 splx(s
); /* Restore the interrupt level */
227 return FALSE
; /* Bad hair day, return FALSE... */
230 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
232 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
234 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
236 useadd
= (unsigned int *)&pmap
->pmapUsage
[(va
>> pmapUsageShft
) & pmapUsageMask
]; /* Point to slot to bump */
237 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
238 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
241 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
242 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
243 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
244 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
249 hw_rem_map(mp
); /* Remove the corresponding mapping */
251 if(mpv
->physent
)hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock physical entry associated with mapping */
253 splx(s
); /* Was there something you needed? */
255 mapping_free(mpv
); /* Add mapping to the free list */
256 debugLog2(2, 1, 0); /* end mapping_remove */
257 return TRUE
; /* Tell them we did it */
261 * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
263 * This guy releases any mappings that exist for a physical page on a specified map.
264 * We get the lock on the phys_entry, and hold it through out this whole routine.
265 * That way, no one can change the queue out from underneath us. We keep fetching
266 * the physents mapping anchor until it is null, then we're done.
268 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
269 * decriment the pmap's residency count. Then we release the mapping back to the free list.
274 void mapping_purge_pmap(struct phys_entry
*pp
, pmap_t pmap
) { /* Remove all mappings from specified pmap for this physent */
276 mapping
*mp
, *mp_next
, *mpv
;
278 unsigned int *useadd
, *useaddr
, uindx
;
281 s
=splhigh(); /* Don't bother me */
283 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
284 panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
285 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
288 mp
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
);
290 while(mp
) { /* Keep going so long as there's another */
292 mpv
= hw_cpv(mp
); /* Get the virtual address */
293 if(mpv
->pmap
!= pmap
) {
294 mp
= (mapping
*)((unsigned int)mpv
->next
& ~PHYS_FLAGS
);
298 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
300 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
303 uindx
= ((mpv
->PTEv
>> 24) & 0x78) | ((mpv
->PTEv
>> 3) & 7); /* Join seg # and top 2 bits of API */
304 useadd
= (unsigned int *)&mpv
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
305 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
306 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Incr the even or odd slot */
310 mp_next
= (mapping
*)((unsigned int)mpv
->next
& ~PHYS_FLAGS
);
311 hw_rem_map(mp
); /* Remove the mapping */
312 mapping_free(mpv
); /* Add mapping to the free list */
316 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
321 * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list
323 * This guy releases any mappings that exist for a physical page.
324 * We get the lock on the phys_entry, and hold it through out this whole routine.
325 * That way, no one can change the queue out from underneath us. We keep fetching
326 * the physents mapping anchor until it is null, then we're done.
328 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
329 * decriment the pmap's residency count. Then we release the mapping back to the free list.
333 void mapping_purge(struct phys_entry
*pp
) { /* Remove all mappings for this physent */
337 unsigned int *useadd
, *useaddr
, uindx
;
340 s
=splhigh(); /* Don't bother me */
341 debugLog2(3, pp
->pte1
, 0); /* start mapping_purge */
343 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
344 panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
345 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
348 while(mp
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
)) { /* Keep going so long as there's another */
350 mpv
= hw_cpv(mp
); /* Get the virtual address */
352 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
354 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
357 uindx
= ((mpv
->PTEv
>> 24) & 0x78) | ((mpv
->PTEv
>> 3) & 7); /* Join segment number and top 2 bits of the API */
358 useadd
= (unsigned int *)&mpv
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
359 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
360 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
363 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
364 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
365 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
366 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
372 hw_rem_map(mp
); /* Remove the mapping */
373 mapping_free(mpv
); /* Add mapping to the free list */
376 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
378 debugLog2(4, pp
->pte1
, 0); /* end mapping_purge */
379 splx(s
); /* Was there something you needed? */
380 return; /* Tell them we did it */
385 * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one
387 * This routine takes the given parameters, builds a mapping block, and queues it into the
390 * The pp parameter can be null. This allows us to make a mapping that is not
391 * associated with any physical page. We may need this for certain I/O areas.
393 * If the phys_entry address is null, we neither lock or chain into it.
394 * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
397 mapping
*mapping_make(pmap_t pmap
, struct phys_entry
*pp
, vm_offset_t va
, vm_offset_t pa
, vm_prot_t prot
, int attr
, boolean_t locked
) { /* Make an address mapping */
399 register mapping
*mp
, *mpv
;
400 unsigned int *useadd
, *useaddr
;
404 debugLog2(5, va
, pa
); /* start mapping_purge */
405 mpv
= mapping_alloc(); /* Get a spare mapping block */
407 mpv
->pmap
= pmap
; /* Initialize the pmap pointer */
408 mpv
->physent
= pp
; /* Initialize the pointer to the physical entry */
409 mpv
->PTEr
= ((unsigned int)pa
& ~(PAGE_SIZE
- 1)) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the PTE */
410 mpv
->PTEv
= (((unsigned int)va
>> 1) & 0x78000000) | (pmap
->space
<< 7) | (((unsigned int)va
>> 22) & 0x0000003F); /* Build the VSID */
412 s
=splhigh(); /* Don't bother from now on */
414 mp
= hw_cvp(mpv
); /* Get the physical address of this */
416 if(pp
&& !locked
) { /* Is there a physical entry? Or do we already hold the lock? */
417 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
418 panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
419 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
423 if(pp
) { /* See of there is a physcial entry */
424 mpv
->next
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
); /* Move the old anchor to the new mappings forward */
425 pp
->phys_link
= (mapping
*)((unsigned int)mp
| (unsigned int)pp
->phys_link
& PHYS_FLAGS
); /* Point the anchor at us. Now we're on the list (keep the flags) */
428 hw_add_map(mp
, pmap
->space
, va
); /* Stick it on the PTEG hash list */
430 (void)hw_atomic_add(&mpv
->pmap
->stats
.resident_count
, 1); /* Increment the resident page count */
431 useadd
= (unsigned int *)&pmap
->pmapUsage
[(va
>> pmapUsageShft
) & pmapUsageMask
]; /* Point to slot to bump */
432 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
433 (void)hw_atomic_add(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
435 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
436 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
437 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
438 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
443 if(pp
&& !locked
)hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* If we have one and we didn't hold on entry, unlock the physical entry */
445 splx(s
); /* Ok for interruptions now */
446 debugLog2(6, pmap
->space
, prot
); /* end mapping_purge */
447 return mpv
; /* Leave... */
452 * Enters optimal translations for odd-sized V=F blocks.
454 * Builds a block map for each power-of-two hunk o' address
455 * that exists. This is specific to the processor type.
456 * PPC uses BAT register size stuff. Future PPC might have
459 * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
460 * stupid to know otherwise so we only look at the va anyhow, so there...
464 void mapping_block_map_opt(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_offset_t bnd
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Maps optimal autogenned blocks */
466 register blokmap
*blm
, *oblm
;
468 unsigned int maxsize
, boundary
, leading
, trailing
, cbsize
, minsize
, tomin
;
469 int i
, maxshft
, nummax
, minshft
;
472 kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
473 pmap
, va
, pa
, bnd
, size
, prot
, attr
);
476 minsize
= blokValid
^ (blokValid
& (blokValid
- 1)); /* Set minimum subblock size */
477 maxsize
= 0x80000000 >> cntlzw(blokValid
); /* Set maximum subblock size */
479 minshft
= 31 - cntlzw(minsize
); /* Shift to position minimum size */
480 maxshft
= 31 - cntlzw(blokValid
); /* Shift to position maximum size */
482 leading
= ((va
+ bnd
- 1) & -bnd
) - va
; /* Get size of leading area */
483 trailing
= size
- leading
; /* Get size of trailing area */
484 tomin
= ((va
+ minsize
- 1) & -minsize
) - va
; /* Get size needed to round up to the minimum block size */
487 kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd
, leading
, trailing
, tomin
); /* (TEST/DEBUG) */
490 if(tomin
)pmap_map_block(pmap
, va
, pa
, tomin
, prot
, attr
, 0); /* Map up to minimum block size */
492 va
= va
+ tomin
; /* Adjust virtual start */
493 pa
= pa
+ tomin
; /* Adjust physical start */
494 leading
= leading
- tomin
; /* Adjust leading size */
497 * Some of this code is very classic PPC. We need to fix this up.
500 leading
= leading
>> minshft
; /* Position for bit testing */
501 cbsize
= minsize
; /* Set the minimum size */
503 for(i
= 0; i
< (maxshft
- minshft
+ 1); i
++) { /* Cycle through all block sizes, small to large */
506 pmap_map_block(pmap
, va
, pa
, cbsize
, prot
, attr
, 0); /* Map up to next boundary */
507 pa
= pa
+ cbsize
; /* Bump up physical address */
508 va
= va
+ cbsize
; /* Bump up virtual address */
511 leading
= leading
>> 1; /* Shift up to next size */
512 cbsize
= cbsize
<< 1; /* Here too */
516 nummax
= trailing
>> maxshft
; /* Get number of max size blocks left */
517 for(i
=0; i
< nummax
- 1; i
++) { /* Account for all max size block left but 1 */
518 pmap_map_block(pmap
, va
, pa
, maxsize
, prot
, attr
, 0); /* Map up to next boundary */
520 pa
= pa
+ maxsize
; /* Bump up physical address */
521 va
= va
+ maxsize
; /* Bump up virtual address */
522 trailing
-= maxsize
; /* Back off what we just did */
525 cbsize
= maxsize
; /* Start at maximum size */
527 for(i
= 0; i
< (maxshft
- minshft
+ 1); i
++) { /* Cycle through all block sizes, high to low */
529 if(trailing
& cbsize
) {
530 trailing
&= ~cbsize
; /* Remove the block we are allocating */
531 pmap_map_block(pmap
, va
, pa
, cbsize
, prot
, attr
, 0); /* Map up to next boundary */
532 pa
= pa
+ cbsize
; /* Bump up physical address */
533 va
= va
+ cbsize
; /* Bump up virtual address */
535 cbsize
= cbsize
>> 1; /* Next size down */
538 if(trailing
) pmap_map_block(pmap
, va
, pa
, trailing
, prot
, attr
, 0); /* Map up to end */
545 * Enters translations for odd-sized V=F blocks.
547 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
548 * will be split into normal-sized page mappings.
550 * The higher level VM map should be locked to insure that we don't have a
551 * double diddle here.
553 * We panic if we get a block that overlaps with another. We do not merge adjacent
554 * blocks because removing any address within a block removes the entire block and if
555 * would really mess things up if we trashed too much.
557 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
558 * not be changed. The block must be unmapped and then remapped with the new stuff.
559 * We also do not keep track of reference or change flags.
561 * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
562 * with interruptions and translation disabled and under the control of the lock located
563 * in the first block map. MRU is used because it is expected that the same entry
564 * will be accessed repeatedly while PTEs are being generated to cover those addresses.
568 void pmap_map_block(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
, unsigned int flags
) { /* Map an autogenned block */
570 register blokmap
*blm
, *oblm
, *oblm_virt
;;
574 kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
575 pmap
, va
, pa
, size
, prot
, attr
);
578 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
579 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
580 mapping_make(pmap
, 0, va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
582 kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */
586 return; /* All done */
589 blm
= (blokmap
*)mapping_alloc(); /* Get a block mapping */
591 blm
->start
= (unsigned int)va
& -PAGE_SIZE
; /* Get virtual block start */
592 blm
->end
= (blm
->start
+ size
- 1) | (PAGE_SIZE
- 1); /* Get virtual block end */
594 blm
->PTEr
= ((unsigned int)pa
& -PAGE_SIZE
) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the base PTE */
595 blm
->space
= pmap
->space
; /* Set the space (only needed for remove) */
596 blm
->blkFlags
= flags
; /* Set the block's flags */
599 kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
600 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
603 blm
= (blokmap
*)hw_cvp((mapping
*)blm
); /* Get the physical address of this */
606 kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
611 oblm
= hw_add_blk(pmap
, blm
);
612 if ((unsigned int)oblm
& 2) {
613 oblm_virt
= (blokmap
*)hw_cpv((mapping
*)((unsigned int)oblm
& 0xFFFFFFFC));
614 mapping_remove(pmap
, oblm_virt
->start
);
616 } while ((unsigned int)oblm
& 2);
619 oblm
= (blokmap
*)hw_cpv((mapping
*) oblm
); /* Get the old block virtual address */
620 blm
= (blokmap
*)hw_cpv((mapping
*)blm
); /* Back to the virtual address of this */
621 if((oblm
->start
!= blm
->start
) || /* If we have a match, then this is a fault race and */
622 (oblm
->end
!= blm
->end
) || /* is acceptable */
623 (oblm
->PTEr
!= blm
->PTEr
))
624 panic("pmap_map_block: block map overlap - blm = %08X\n", oblm
);/* Otherwise, Squeak loudly and carry a big stick */
625 mapping_free((struct mapping
*)blm
);
629 kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
638 * Optimally enters translations for odd-sized V=F blocks.
640 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
641 * will be split into normal-sized page mappings.
643 * This one is different than pmap_map_block in that it will allocate it's own virtual
644 * target address. Rather than allocating a single block,
645 * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows
646 * hardware-level mapping that takes advantage of BAT maps or large page sizes.
648 * Most considerations for pmap_map_block apply.
653 kern_return_t
pmap_map_block_opt(vm_map_t map
, vm_offset_t
*va
,
654 vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Map an optimal autogenned block */
656 register blokmap
*blm
, *oblm
;
662 kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
663 map
, pa
, size
, prot
, attr
);
666 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
667 err
= vm_allocate(map
, va
, size
, VM_FLAGS_ANYWHERE
); /* Make us some memories */
670 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err
); /* Say we died */
672 return(err
); /* Pass back the error */
675 kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va
); /* (TEST/DEBUG) */
678 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
679 mapping_make(map
->pmap
, 0, *va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
681 return(KERN_SUCCESS
); /* All done */
684 err
= vm_map_block(map
, va
, &bnd
, pa
, size
, prot
); /* Go get an optimal allocation */
686 if(err
== KERN_INVALID_ADDRESS
) { /* Can we try a brute force block mapping? */
687 err
= vm_allocate(map
, va
, size
, VM_FLAGS_ANYWHERE
); /* Make us some memories */
690 kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err
); /* Say we died */
692 return(err
); /* Pass back the error */
695 kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va
); /* (TEST/DEBUG) */
697 pmap_map_block(map
->pmap
, *va
, pa
, size
, prot
, attr
, 0); /* Set up a block mapped area */
698 return KERN_SUCCESS
; /* All done now */
701 if(err
!= KERN_SUCCESS
) { /* We couldn't get any address range to map this... */
703 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err
); /* Say we couldn' do it */
709 kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va
, bnd
); /* (TEST/DEBUG) */
711 mapping_block_map_opt(map
->pmap
, *va
, pa
, bnd
, size
, prot
, attr
); /* Go build the maps */
712 return(KERN_SUCCESS
); /* All done */
719 * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
722 * Once blocks are merged, they act like one block, i.e., if you remove it,
725 * This can only be used during boot. Ain't no way we can handle SMP
726 * or preemption easily, so we restrict it. We don't check either. We
727 * assume only skilled professional programmers will attempt using this
728 * function. We assume no responsibility, either real or imagined, for
729 * injury or death resulting from unauthorized use of this function.
731 * No user servicable parts inside. Notice to be removed by end-user only,
732 * under penalty of applicable federal and state laws.
734 * See descriptions of pmap_map_block. Ignore the part where we say we panic for
735 * overlapping areas. Note that we do panic if we can't merge.
739 void pmap_map_block_merge(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Map an autogenned block */
741 register blokmap
*blm
, *oblm
;
746 kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
747 pmap
, va
, pa
, size
, prot
, attr
);
750 s
=splhigh(); /* Don't bother from now on */
751 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
752 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
753 mapping_make(pmap
, 0, va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
755 return; /* All done */
758 blm
= (blokmap
*)mapping_alloc(); /* Get a block mapping */
760 blm
->start
= (unsigned int)va
& -PAGE_SIZE
; /* Get virtual block start */
761 blm
->end
= (blm
->start
+ size
- 1) | (PAGE_SIZE
- 1); /* Get virtual block end */
762 blm
->PTEr
= ((unsigned int)pa
& -PAGE_SIZE
) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the base PTE */
765 kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
766 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
769 blm
= (blokmap
*)hw_cvp((mapping
*)blm
); /* Get the physical address of this */
772 kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
776 if(oblm
= hw_add_blk(pmap
, blm
)) { /* Add to list and make sure we don't overlap anything */
777 panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm
); /* Squeak loudly and carry a big stick */
781 kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
784 splx(s
); /* Ok for interruptions now */
791 * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
793 * This routine takes a physical entry and runs through all mappings attached to it and changes
794 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
795 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
796 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
797 * higher to lower, lower to higher.
799 * Phys_entry is unlocked.
802 void mapping_protect_phys(struct phys_entry
*pp
, vm_prot_t prot
, boolean_t locked
) { /* Change protection of all mappings to page */
806 debugLog2(9, pp
->pte1
, prot
); /* end remap */
807 spl
=splhigh(); /* No interruptions during this */
808 if(!locked
) { /* Do we need to lock the physent? */
809 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
810 panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
811 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
815 hw_prot(pp
, ppc_prot(prot
)); /* Go set the protection on this physical page */
817 if(!locked
) hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
818 splx(spl
); /* Restore interrupt state */
819 debugLog2(10, pp
->pte1
, 0); /* end remap */
821 return; /* Leave... */
825 * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
827 * This routine takes a pmap and virtual address and changes
828 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
829 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
830 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
831 * higher to lower, lower to higher.
835 void mapping_protect(pmap_t pmap
, vm_offset_t vaddr
, vm_prot_t prot
) { /* Change protection of a virtual page */
840 debugLog2(9, vaddr
, pmap
); /* start mapping_protect */
841 s
= splhigh(); /* Don't bother me */
843 mp
= hw_lock_phys_vir(pmap
->space
, vaddr
); /* Lock the physical entry for this mapping */
845 if(!mp
) { /* Did we find one? */
846 splx(s
); /* Restore the interrupt level */
847 debugLog2(10, 0, 0); /* end mapping_pmap */
848 return; /* Didn't find any... */
850 if((unsigned int)mp
& 1) { /* Did we timeout? */
851 panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */
852 splx(s
); /* Restore the interrupt level */
853 return; /* Bad hair day... */
856 hw_prot_virt(mp
, ppc_prot(prot
)); /* Go set the protection on this virtual mapping */
858 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
859 if(mpv
->physent
) { /* If there is a physical page, */
860 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
862 splx(s
); /* Restore interrupt state */
863 debugLog2(10, mpv
->PTEr
, 0); /* end remap */
865 return; /* Leave... */
869 * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
871 * This routine takes a physical entry and sets the physical attributes. There can be no mappings
872 * associated with this page when we do it.
875 void mapping_phys_attr(struct phys_entry
*pp
, vm_prot_t prot
, unsigned int wimg
) { /* Sets the default physical page attributes */
877 debugLog2(11, pp
->pte1
, prot
); /* end remap */
879 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
880 panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
881 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
884 hw_phys_attr(pp
, ppc_prot(prot
), wimg
); /* Go set the default WIMG and protection */
886 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
887 debugLog2(12, pp
->pte1
, wimg
); /* end remap */
889 return; /* Leave... */
893 * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
895 * This routine takes a physical entry and runs through all mappings attached to it and invalidates
898 * Interruptions must be disabled and the physical entry locked at entry.
901 void mapping_invall(struct phys_entry
*pp
) { /* Clear all PTEs pointing to a physical page */
903 hw_inv_all(pp
); /* Go set the change bit of a physical page */
905 return; /* Leave... */
910 * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
912 * This routine takes a physical entry and runs through all mappings attached to it and turns
913 * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before
914 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
915 * either (I don't think, maybe I'll change my mind later).
917 * Interruptions must be disabled and the physical entry locked at entry.
920 void mapping_clr_mod(struct phys_entry
*pp
) { /* Clears the change bit of a physical page */
922 hw_clr_mod(pp
); /* Go clear the change bit of a physical page */
923 return; /* Leave... */
928 * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
930 * This routine takes a physical entry and runs through all mappings attached to it and turns
931 * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before
932 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
933 * either (I don't think, maybe I'll change my mind later).
935 * Interruptions must be disabled and the physical entry locked at entry.
938 void mapping_set_mod(struct phys_entry
*pp
) { /* Sets the change bit of a physical page */
940 hw_set_mod(pp
); /* Go set the change bit of a physical page */
941 return; /* Leave... */
946 * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
948 * This routine takes a physical entry and runs through all mappings attached to it and turns
949 * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
950 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
951 * either (I don't think, maybe I'll change my mind later).
953 * Interruptions must be disabled at entry.
956 void mapping_clr_ref(struct phys_entry
*pp
) { /* Clears the reference bit of a physical page */
960 debugLog2(13, pp
->pte1
, 0); /* end remap */
961 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry for this mapping */
962 panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
964 hw_clr_ref(pp
); /* Go clear the reference bit of a physical page */
965 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock physical entry */
966 debugLog2(14, pp
->pte1
, 0); /* end remap */
967 return; /* Leave... */
972 * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
974 * This routine takes a physical entry and runs through all mappings attached to it and turns
975 * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
976 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
977 * either (I don't think, maybe I'll change my mind later).
979 * Interruptions must be disabled and the physical entry locked at entry.
982 void mapping_set_ref(struct phys_entry
*pp
) { /* Sets the reference bit of a physical page */
984 hw_set_ref(pp
); /* Go set the reference bit of a physical page */
985 return; /* Leave... */
990 * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
992 * This routine takes a physical entry and runs through all mappings attached to it and tests
993 * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before
994 * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations
995 * either (I don't think, maybe I'll change my mind later).
997 * Interruptions must be disabled and the physical entry locked at entry.
1000 boolean_t
mapping_tst_mod(struct phys_entry
*pp
) { /* Tests the change bit of a physical page */
1002 return(hw_tst_mod(pp
)); /* Go test the change bit of a physical page */
1007 * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
1009 * This routine takes a physical entry and runs through all mappings attached to it and tests
1010 * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1011 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1012 * either (I don't think, maybe I'll change my mind later).
1014 * Interruptions must be disabled and the physical entry locked at entry.
1017 boolean_t
mapping_tst_ref(struct phys_entry
*pp
) { /* Tests the reference bit of a physical page */
1019 return(hw_tst_ref(pp
)); /* Go test the reference bit of a physical page */
1024 * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
1026 * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits
1029 void mapping_phys_init(struct phys_entry
*pp
, unsigned int pa
, unsigned int wimg
) { /* Initializes hw specific storage attributes */
1031 pp
->pte1
= (pa
& -PAGE_SIZE
) | ((wimg
<< 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */
1033 return; /* Leave... */
1038 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
1040 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
1041 * the number of free mappings remaining, and if below a threshold, replenishes them.
1042 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
1043 * a new one is allocated.
1045 * This routine allocates and/or memory and must be called from a safe place.
1046 * Currently, vm_pageout_scan is the safest place. We insure that the
1049 thread_call_t mapping_adjust_call
;
1050 static thread_call_data_t mapping_adjust_call_data
;
1052 void mapping_adjust(void) { /* Adjust free mappings */
1055 mappingblok
*mb
, *mbn
;
1058 extern int vm_page_free_count
;
1060 if(mapCtl
.mapcmin
<= MAPPERBLOK
) {
1061 mapCtl
.mapcmin
= (mem_size
/ PAGE_SIZE
) / 16;
1064 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl
.mapcmin
);
1065 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
1066 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, mapCtl
.mapcreln
);
1070 s
= splhigh(); /* Don't bother from now on */
1071 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1072 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
1075 if (mapping_adjust_call
== NULL
) {
1076 thread_call_setup(&mapping_adjust_call_data
,
1077 (thread_call_func_t
)mapping_adjust
,
1078 (thread_call_param_t
)NULL
);
1079 mapping_adjust_call
= &mapping_adjust_call_data
;
1082 while(1) { /* Keep going until we've got enough */
1084 allocsize
= mapCtl
.mapcmin
- mapCtl
.mapcfree
; /* Figure out how much we need */
1085 if(allocsize
< 1) break; /* Leave if we have all we need */
1087 if((unsigned int)(mbn
= mapCtl
.mapcrel
)) { /* Can we rescue a free one? */
1088 mapCtl
.mapcrel
= mbn
->nextblok
; /* Dequeue it */
1089 mapCtl
.mapcreln
--; /* Back off the count */
1090 allocsize
= MAPPERBLOK
; /* Show we allocated one block */
1092 else { /* No free ones, try to get it */
1094 allocsize
= (allocsize
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get the number of pages we need */
1096 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1097 splx(s
); /* Restore 'rupts */
1099 for(; allocsize
> 0; allocsize
>>= 1) { /* Try allocating in descending halves */
1100 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
* allocsize
); /* Find a virtual address to use */
1101 if((retr
!= KERN_SUCCESS
) && (allocsize
== 1)) { /* Did we find any memory at all? */
1104 if(retr
== KERN_SUCCESS
) break; /* We got some memory, bail out... */
1106 allocsize
= allocsize
* MAPPERBLOK
; /* Convert pages to number of maps allocated */
1107 s
= splhigh(); /* Don't bother from now on */
1108 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1109 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
1112 if (retr
!= KERN_SUCCESS
)
1113 break; /* Fail to alocate, bail out... */
1114 for(; allocsize
> 0; allocsize
-= MAPPERBLOK
) { /* Release one block at a time */
1115 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1116 mbn
= (mappingblok
*)((unsigned int)mbn
+ PAGE_SIZE
); /* Point to the next slot */
1118 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1119 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1122 if(mapCtl
.mapcholdoff
) { /* Should we hold off this release? */
1123 mapCtl
.mapcrecurse
= 0; /* We are done now */
1124 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1125 splx(s
); /* Restore 'rupts */
1126 return; /* Return... */
1129 mbn
= mapCtl
.mapcrel
; /* Get first pending release block */
1130 mapCtl
.mapcrel
= 0; /* Dequeue them */
1131 mapCtl
.mapcreln
= 0; /* Set count to 0 */
1133 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1134 splx(s
); /* Restore 'rupts */
1136 while((unsigned int)mbn
) { /* Toss 'em all */
1137 mb
= mbn
->nextblok
; /* Get the next */
1138 kmem_free(mapping_map
, (vm_offset_t
) mbn
, PAGE_SIZE
); /* Release this mapping block */
1139 mbn
= mb
; /* Chain to the next */
1142 __asm__
volatile("sync"); /* Make sure all is well */
1143 mapCtl
.mapcrecurse
= 0; /* We are done now */
1148 * mapping_free(mapping *mp) - release a mapping to the free list
1150 * This routine takes a mapping and adds it to the free list.
1151 * If this mapping make the block non-empty, we queue it to the free block list.
1152 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
1153 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
1154 * If this release fills a block and we are above the threshold, we release the block
1157 void mapping_free(struct mapping
*mp
) { /* Release a mapping */
1159 mappingblok
*mb
, *mbn
;
1161 unsigned int full
, mindx
;
1163 mindx
= ((unsigned int)mp
& (PAGE_SIZE
- 1)) >> 5; /* Get index to mapping */
1164 mb
= (mappingblok
*)((unsigned int)mp
& -PAGE_SIZE
); /* Point to the mapping block */
1166 s
= splhigh(); /* Don't bother from now on */
1167 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1168 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
1171 full
= !(mb
->mapblokfree
[0] | mb
->mapblokfree
[1] | mb
->mapblokfree
[2] | mb
->mapblokfree
[3]); /* See if full now */
1172 mb
->mapblokfree
[mindx
>> 5] |= (0x80000000 >> (mindx
& 31)); /* Flip on the free bit */
1174 if(full
) { /* If it was full before this: */
1175 mb
->nextblok
= mapCtl
.mapcnext
; /* Move head of list to us */
1176 mapCtl
.mapcnext
= mb
; /* Chain us to the head of the list */
1177 if(!((unsigned int)mapCtl
.mapclast
))
1178 mapCtl
.mapclast
= mb
;
1181 mapCtl
.mapcfree
++; /* Bump free count */
1182 mapCtl
.mapcinuse
--; /* Decriment in use count */
1184 mapCtl
.mapcfreec
++; /* Count total calls */
1186 if(mapCtl
.mapcfree
> mapCtl
.mapcmin
) { /* Should we consider releasing this? */
1187 if(((mb
->mapblokfree
[0] | 0x80000000) & mb
->mapblokfree
[1] & mb
->mapblokfree
[2] & mb
->mapblokfree
[3])
1188 == 0xFFFFFFFF) { /* See if empty now */
1190 if(mapCtl
.mapcnext
== mb
) { /* Are we first on the list? */
1191 mapCtl
.mapcnext
= mb
->nextblok
; /* Unchain us */
1192 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* If last, remove last */
1194 else { /* We're not first */
1195 for(mbn
= mapCtl
.mapcnext
; mbn
!= 0; mbn
= mbn
->nextblok
) { /* Search for our block */
1196 if(mbn
->nextblok
== mb
) break; /* Is the next one our's? */
1198 if(!mbn
) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp
);
1199 mbn
->nextblok
= mb
->nextblok
; /* Dequeue us */
1200 if(mapCtl
.mapclast
== mb
) mapCtl
.mapclast
= mbn
; /* If last, make our predecessor last */
1203 if(mb
->mapblokflags
& mbPerm
) { /* Is this permanently assigned? */
1204 mb
->nextblok
= mapCtl
.mapcnext
; /* Move chain head to us */
1205 mapCtl
.mapcnext
= mb
; /* Chain us to the head */
1206 if(!((unsigned int)mb
->nextblok
)) mapCtl
.mapclast
= mb
; /* If last, make us so */
1209 mapCtl
.mapcfree
-= MAPPERBLOK
; /* Remove the block from the free count */
1210 mapCtl
.mapcreln
++; /* Count on release list */
1211 mb
->nextblok
= mapCtl
.mapcrel
; /* Move pointer */
1212 mapCtl
.mapcrel
= mb
; /* Chain us in front */
1217 if(mapCtl
.mapcreln
> MAPFRTHRSH
) { /* Do we have way too many releasable mappings? */
1218 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1219 thread_call_enter(mapping_adjust_call
); /* Go toss some */
1222 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1223 splx(s
); /* Restore 'rupts */
1225 return; /* Bye, dude... */
1230 * mapping_alloc(void) - obtain a mapping from the free list
1232 * This routine takes a mapping off of the free list and returns it's address.
1234 * We do this by finding a free entry in the first block and allocating it.
1235 * If this allocation empties the block, we remove it from the free list.
1236 * If this allocation drops the total number of free entries below a threshold,
1237 * we allocate a new block.
1241 mapping
*mapping_alloc(void) { /* Obtain a mapping */
1243 register mapping
*mp
;
1244 mappingblok
*mb
, *mbn
;
1249 s
= splhigh(); /* Don't bother from now on */
1250 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1251 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1254 if(!(mb
= mapCtl
.mapcnext
)) { /* Get the first block entry */
1256 struct mappingflush mappingflush
;
1257 PCA
*pca_min
, *pca_max
;
1260 pca_min
= (PCA
*)(hash_table_base
+hash_table_size
);
1261 pca_max
= (PCA
*)(hash_table_base
+hash_table_size
+hash_table_size
);
1263 while (mapCtl
.mapcfree
<= (MAPPERBLOK
*2)) {
1264 mapCtl
.mapcflush
.mappingcnt
= 0;
1265 pca_base
= mapCtl
.mapcflush
.pcaptr
;
1267 hw_select_mappings(&mapCtl
.mapcflush
);
1268 mapCtl
.mapcflush
.pcaptr
++;
1269 if (mapCtl
.mapcflush
.pcaptr
>= pca_max
)
1270 mapCtl
.mapcflush
.pcaptr
= pca_min
;
1271 } while ((mapCtl
.mapcflush
.mappingcnt
== 0) && (mapCtl
.mapcflush
.pcaptr
!= pca_base
));
1273 if ((mapCtl
.mapcflush
.mappingcnt
== 0) && (mapCtl
.mapcflush
.pcaptr
== pca_base
)) {
1274 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1275 panic("mapping_alloc - all mappings are wired\n");
1277 mappingflush
= mapCtl
.mapcflush
;
1278 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1280 for (i
=0;i
<mappingflush
.mappingcnt
;i
++)
1281 mapping_remove(mappingflush
.mapping
[i
].pmap
,
1282 mappingflush
.mapping
[i
].offset
);
1284 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) {
1285 panic("mapping_alloc - timeout getting control lock\n");
1288 mb
= mapCtl
.mapcnext
;
1291 if(!(mindx
= mapalc(mb
))) { /* Allocate a slot */
1292 panic("mapping_alloc - empty mapping block detected at %08X\n", mb
); /* Not allowed to find none */
1295 if(mindx
< 0) { /* Did we just take the last one */
1296 mindx
= -mindx
; /* Make positive */
1297 mapCtl
.mapcnext
= mb
->nextblok
; /* Remove us from the list */
1298 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* Removed the last one */
1301 mapCtl
.mapcfree
--; /* Decrement free count */
1302 mapCtl
.mapcinuse
++; /* Bump in use count */
1304 mapCtl
.mapcallocc
++; /* Count total calls */
1307 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1308 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1309 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1310 * if we haven't already done it.
1311 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1312 * the release list with as much as we need until threads start.
1314 if(mapCtl
.mapcfree
< mapCtl
.mapcmin
) { /* See if we need to replenish */
1315 if(mbn
= mapCtl
.mapcrel
) { /* Try to rescue a block from impending doom */
1316 mapCtl
.mapcrel
= mbn
->nextblok
; /* Pop the queue */
1317 mapCtl
.mapcreln
--; /* Back off the count */
1318 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1320 else { /* We need to replenish */
1321 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1322 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1323 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1329 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1330 splx(s
); /* Restore 'rupts */
1332 mp
= &((mapping
*)mb
)[mindx
]; /* Point to the allocated mapping */
1333 __asm__
volatile("dcbz 0,%0" : : "r" (mp
)); /* Clean it up */
1334 return mp
; /* Send it back... */
1339 consider_mapping_adjust()
1343 s
= splhigh(); /* Don't bother from now on */
1344 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1345 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1348 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1349 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1350 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1354 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1355 splx(s
); /* Restore 'rupts */
1362 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1364 * The mapping block is a page size area on a page boundary. It contains 1 header and 127
1365 * mappings. This call adds and initializes a block for use.
1367 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1368 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1369 * corresponds to the header). The translation mask is the XOR of the virtual and real
1370 * addresses (needless to say, the block must be wired).
1372 * We handle these mappings the same way as saveareas: the block is only on the chain so
1373 * long as there are free entries in it.
1375 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1376 * mappings. Blocks marked PERM won't ever be released.
1378 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1379 * list. We do this only at start up time. This is done because we only allocate blocks
1380 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1381 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1382 * them on the release queue, the allocate routine will rescue them. Then when the
1383 * pageout scan starts, all extra ones will be released.
1388 void mapping_free_init(vm_offset_t mbl
, int perm
, boolean_t locked
) {
1389 /* Set's start and end of a block of mappings
1390 perm indicates if the block can be released
1391 or goes straight to the release queue .
1392 locked indicates if the lock is held already */
1399 mb
= (mappingblok
*)mbl
; /* Start of area */
1402 if(perm
>= 0) { /* See if we need to initialize the block */
1404 raddr
= (unsigned int)mbl
; /* Perm means V=R */
1405 mb
->mapblokflags
= mbPerm
; /* Set perm */
1408 raddr
= kvtophys(mbl
); /* Get real address */
1409 mb
->mapblokflags
= 0; /* Set not perm */
1412 mb
->mapblokvrswap
= raddr
^ (unsigned int)mbl
; /* Form translation mask */
1414 mb
->mapblokfree
[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1415 mb
->mapblokfree
[1] = 0xFFFFFFFF; /* Set next 32 free */
1416 mb
->mapblokfree
[2] = 0xFFFFFFFF; /* Set next 32 free */
1417 mb
->mapblokfree
[3] = 0xFFFFFFFF; /* Set next 32 free */
1420 s
= splhigh(); /* Don't bother from now on */
1421 if(!locked
) { /* Do we need the lock? */
1422 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1423 panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */
1427 if(perm
< 0) { /* Direct to release queue? */
1428 mb
->nextblok
= mapCtl
.mapcrel
; /* Move forward pointer */
1429 mapCtl
.mapcrel
= mb
; /* Queue us on in */
1430 mapCtl
.mapcreln
++; /* Count the free block */
1432 else { /* Add to the free list */
1434 mb
->nextblok
= 0; /* We always add to the end */
1435 mapCtl
.mapcfree
+= MAPPERBLOK
; /* Bump count */
1437 if(!((unsigned int)mapCtl
.mapcnext
)) { /* First entry on list? */
1438 mapCtl
.mapcnext
= mapCtl
.mapclast
= mb
; /* Chain to us */
1440 else { /* We are not the first */
1441 mapCtl
.mapclast
->nextblok
= mb
; /* Point the last to us */
1442 mapCtl
.mapclast
= mb
; /* We are now last */
1446 if(!locked
) { /* Do we need to unlock? */
1447 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1449 splx(s
); /* Restore 'rupts */
1450 return; /* All done, leave... */
1455 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1457 * No locks can be held, because we allocate memory here.
1458 * This routine needs a corresponding mapping_relpre call to remove the
1459 * hold off flag so that the adjust routine will free the extra mapping
1460 * blocks on the release list. I don't like this, but I don't know
1461 * how else to do this for now...
1465 void mapping_prealloc(unsigned int size
) { /* Preallocates mapppings for large request */
1472 s
= splhigh(); /* Don't bother from now on */
1473 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1474 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1477 nmapb
= (size
>> 12) + mapCtl
.mapcmin
; /* Get number of entries needed for this and the minimum */
1479 mapCtl
.mapcholdoff
++; /* Bump the hold off count */
1481 if((nmapb
= (nmapb
- mapCtl
.mapcfree
)) <= 0) { /* Do we already have enough? */
1482 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1483 splx(s
); /* Restore 'rupts */
1486 if (!hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1487 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1488 splx(s
); /* Restore 'rupts */
1491 nmapb
= (nmapb
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get number of blocks to get */
1493 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1494 splx(s
); /* Restore 'rupts */
1496 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1497 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1498 if(retr
!= KERN_SUCCESS
) { /* Did we get some memory? */
1499 panic("Whoops... Not a bit of wired memory left for anyone\n");
1501 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize on to the release queue */
1503 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1504 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1506 mapCtl
.mapcrecurse
= 0; /* We are done now */
1510 * void mapping_relpre(void) - Releases preallocation release hold off
1512 * This routine removes the
1513 * hold off flag so that the adjust routine will free the extra mapping
1514 * blocks on the release list. I don't like this, but I don't know
1515 * how else to do this for now...
1519 void mapping_relpre(void) { /* Releases release hold off */
1523 s
= splhigh(); /* Don't bother from now on */
1524 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1525 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1527 if(--mapCtl
.mapcholdoff
< 0) { /* Back down the hold off count */
1528 panic("mapping_relpre: hold-off count went negative\n");
1531 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1532 splx(s
); /* Restore 'rupts */
1536 * void mapping_free_prime(void) - Primes the mapping block release list
1538 * See mapping_free_init.
1539 * No locks can be held, because we allocate memory here.
1540 * One processor running only.
1544 void mapping_free_prime(void) { /* Primes the mapping block release list */
1549 vm_offset_t mapping_min
;
1551 retr
= kmem_suballoc(kernel_map
, &mapping_min
, mem_size
/ 16,
1552 FALSE
, TRUE
, &mapping_map
);
1554 if (retr
!= KERN_SUCCESS
)
1555 panic("mapping_free_prime: kmem_suballoc failed");
1558 nmapb
= (mapCtl
.mapcfree
+ mapCtl
.mapcinuse
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get permanent allocation */
1559 nmapb
= nmapb
* 4; /* Get 4 times our initial allocation */
1562 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1563 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, nmapb
);
1566 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1567 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1568 if(retr
!= KERN_SUCCESS
) { /* Did we get some memory? */
1569 panic("Whoops... Not a bit of wired memory left for anyone\n");
1571 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize onto release queue */
1573 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1574 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1579 mapping_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
1580 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
1582 *count
= mapCtl
.mapcinuse
;
1583 *cur_size
= ((PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * (mapCtl
.mapcinuse
+ mapCtl
.mapcfree
)) + (PAGE_SIZE
* mapCtl
.mapcreln
);
1584 *max_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * mapCtl
.mapcmaxalloc
;
1585 *elem_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1));
1586 *alloc_size
= PAGE_SIZE
;
1594 * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
1596 * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with
1597 * the same space. If it finds it, it returns the virtual address.
1599 * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check
1600 * for it and fail it myself...
1603 vm_offset_t
mapping_p2v(pmap_t pmap
, struct phys_entry
*pp
) { /* Finds first virtual mapping of a physical page in a space */
1606 register mapping
*mp
, *mpv
;
1609 if(pmap
->vflags
& pmapAltSeg
) return 0; /* If there are nested pmaps, fail immediately */
1612 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Try to get the lock on the physical entry */
1613 splx(s
); /* Restore 'rupts */
1614 panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */
1615 return(0); /* Should die before here */
1618 va
= 0; /* Assume failure */
1620 for(mpv
= hw_cpv(pp
->phys_link
); mpv
; mpv
= hw_cpv(mpv
->next
)) { /* Scan 'em all */
1622 if(!(((mpv
->PTEv
>> 7) & 0x000FFFFF) == pmap
->space
)) continue; /* Skip all the rest if this is not the right space... */
1624 va
= ((((unsigned int)mpv
->PTEhash
& -64) << 6) ^ (pmap
->space
<< 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
1625 va
= va
| ((mpv
->PTEv
<< 1) & 0xF0000000); /* Move in the segment number */
1626 va
= va
| ((mpv
->PTEv
<< 22) & 0x0FC00000); /* Add in the API for the top of the address */
1627 break; /* We're done now, pass virtual address back */
1630 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1631 splx(s
); /* Restore 'rupts */
1632 return(va
); /* Return the result or 0... */
1638 * Convert a kernel virtual address to a physical address
1640 vm_offset_t
kvtophys(vm_offset_t va
) {
1642 register mapping
*mp
, *mpv
;
1643 register blokmap
*bmp
;
1644 register vm_offset_t pa
;
1647 s
=splhigh(); /* Don't bother from now on */
1648 mp
= hw_lock_phys_vir(PPC_SID_KERNEL
, va
); /* Find mapping and lock the physical entry for this mapping */
1650 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1651 splx(s
); /* Restore 'rupts */
1652 panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va
); /* Scream bloody murder! */
1656 if(!mp
) { /* If it was not a normal page */
1657 pa
= hw_cvp_blk(kernel_pmap
, va
); /* Try to convert odd-sized page (returns 0 if not found) */
1658 splx(s
); /* Restore 'rupts */
1659 return pa
; /* Return physical address */
1662 mpv
= hw_cpv(mp
); /* Convert to virtual addressing */
1664 if(!mpv
->physent
) { /* Was there a physical entry? */
1665 pa
= (vm_offset_t
)((mpv
->PTEr
& -PAGE_SIZE
) | ((unsigned int)va
& (PAGE_SIZE
-1))); /* Get physical address from physent */
1668 pa
= (vm_offset_t
)((mpv
->physent
->pte1
& -PAGE_SIZE
) | ((unsigned int)va
& (PAGE_SIZE
-1))); /* Get physical address from physent */
1669 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1672 splx(s
); /* Restore 'rupts */
1673 return pa
; /* Return the physical address... */
1679 * Convert a physical address to a kernel virtual address if
1680 * there is a mapping, otherwise return NULL
1683 vm_offset_t
phystokv(vm_offset_t pa
) {
1685 struct phys_entry
*pp
;
1688 pp
= pmap_find_physentry(pa
); /* Find the physical entry */
1689 if (PHYS_NULL
== pp
) {
1690 return (vm_offset_t
)NULL
; /* If none, return null */
1692 if(!(va
=mapping_p2v(kernel_pmap
, pp
))) {
1693 return 0; /* Can't find it, return 0... */
1695 return (va
| (pa
& (PAGE_SIZE
-1))); /* Build and return VADDR... */
1700 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1701 * page 0 access for the current thread.
1703 * If parameter is TRUE, faults are ignored
1704 * If parameter is FALSE, faults are honored
1708 void ignore_zero_fault(boolean_t type
) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1710 if(type
) current_act()->mact
.specFlags
|= ignoreZeroFault
; /* Ignore faults on page 0 */
1711 else current_act()->mact
.specFlags
&= ~ignoreZeroFault
; /* Honor faults on page 0 */
1713 return; /* Return the result or 0... */
1718 * Allocates a range of virtual addresses in a map as optimally as
1719 * possible for block mapping. The start address is aligned such
1720 * that a minimum number of power-of-two sized/aligned blocks is
1721 * required to cover the entire range.
1723 * We also use a mask of valid block sizes to determine optimality.
1725 * Note that the passed in pa is not actually mapped to the selected va,
1726 * rather, it is used to figure the optimal boundary. The actual
1727 * V to R mapping is done externally.
1729 * This function will return KERN_INVALID_ADDRESS if an optimal address
1730 * can not be found. It is not necessarily a fatal error, the caller may still be
1731 * still be able to do a non-optimal assignment.
1734 kern_return_t
vm_map_block(vm_map_t map
, vm_offset_t
*va
, vm_offset_t
*bnd
, vm_offset_t pa
,
1735 vm_size_t size
, vm_prot_t prot
) {
1737 vm_map_entry_t entry
, next
, tmp_entry
, new_entry
;
1738 vm_offset_t start
, end
, algnpa
, endadr
, strtadr
, curradr
;
1739 vm_offset_t boundary
;
1741 unsigned int maxsize
, minsize
, leading
, trailing
;
1743 assert(page_aligned(pa
));
1744 assert(page_aligned(size
));
1746 if (map
== VM_MAP_NULL
) return(KERN_INVALID_ARGUMENT
); /* Dude, like we need a target map */
1748 minsize
= blokValid
^ (blokValid
& (blokValid
- 1)); /* Set minimum subblock size */
1749 maxsize
= 0x80000000 >> cntlzw(blokValid
); /* Set maximum subblock size */
1751 boundary
= 0x80000000 >> cntlzw(size
); /* Get optimal boundary */
1752 if(boundary
> maxsize
) boundary
= maxsize
; /* Pin this at maximum supported hardware size */
1754 vm_map_lock(map
); /* No touchee no mapee */
1756 for(; boundary
> minsize
; boundary
>>= 1) { /* Try all optimizations until we find one */
1757 if(!(boundary
& blokValid
)) continue; /* Skip unavailable block sizes */
1758 algnpa
= (pa
+ boundary
- 1) & -boundary
; /* Round physical up */
1759 leading
= algnpa
- pa
; /* Get leading size */
1761 curradr
= 0; /* Start low */
1763 while(1) { /* Try all possible values for this opt level */
1765 curradr
= curradr
+ boundary
; /* Get the next optimal address */
1766 strtadr
= curradr
- leading
; /* Calculate start of optimal range */
1767 endadr
= strtadr
+ size
; /* And now the end */
1769 if((curradr
< boundary
) || /* Did address wrap here? */
1770 (strtadr
> curradr
) || /* How about this way? */
1771 (endadr
< strtadr
)) break; /* We wrapped, try next lower optimization... */
1773 if(strtadr
< map
->min_offset
) continue; /* Jump to the next higher slot... */
1774 if(endadr
> map
->max_offset
) break; /* No room right now... */
1776 if(vm_map_lookup_entry(map
, strtadr
, &entry
)) continue; /* Find slot, continue if allocated... */
1778 next
= entry
->vme_next
; /* Get the next entry */
1779 if((next
== vm_map_to_entry(map
)) || /* Are we the last entry? */
1780 (next
->vme_start
>= endadr
)) { /* or do we end before the next entry? */
1782 new_entry
= vm_map_entry_insert(map
, entry
, strtadr
, endadr
, /* Yes, carve out our entry */
1784 0, /* Offset into object of 0 */
1785 FALSE
, /* No copy needed */
1786 FALSE
, /* Not shared */
1787 FALSE
, /* Not in transition */
1788 prot
, /* Set the protection to requested */
1789 prot
, /* We can't change protection */
1790 VM_BEHAVIOR_DEFAULT
, /* Use default behavior, but makes no never mind,
1791 'cause we don't page in this area */
1792 VM_INHERIT_DEFAULT
, /* Default inheritance */
1793 0); /* Nothing is wired */
1795 vm_map_unlock(map
); /* Let the world see it all */
1796 *va
= strtadr
; /* Tell everyone */
1797 *bnd
= boundary
; /* Say what boundary we are aligned to */
1798 return(KERN_SUCCESS
); /* Leave, all is right with the world... */
1803 vm_map_unlock(map
); /* Couldn't find a slot */
1804 return(KERN_INVALID_ADDRESS
);
1808 * Copies data from a physical page to a virtual page. This is used to
1809 * move data from the kernel to user state.
1811 * Note that it is invalid to have a source that spans a page boundry.
1813 * We don't check protection either.
1814 * And we don't handle a block mapped sink address either.
1818 kern_return_t
copyp2v(vm_offset_t source
, vm_offset_t sink
, unsigned int size
) {
1822 unsigned int spaceid
;
1825 register mapping
*mpv
, *mp
;
1828 if((size
== 0) || ((source
^ (source
+ size
- 1)) & -PAGE_SIZE
)) return KERN_FAILURE
; /* We don't allow a source page crosser */
1829 map
= current_act()->map
; /* Get the current map */
1832 s
=splhigh(); /* Don't bother me */
1834 spaceid
= map
->pmap
->pmapSegs
[(unsigned int)sink
>> 28]; /* Get space ID. Don't bother to clean top bits */
1836 mp
= hw_lock_phys_vir(spaceid
, sink
); /* Lock the physical entry for the sink */
1837 if(!mp
) { /* Was it there? */
1838 splx(s
); /* Restore the interrupt level */
1839 ret
= vm_fault(map
, trunc_page(sink
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, NULL
, 0); /* Didn't find it, try to fault it in... */
1840 if (ret
== KERN_SUCCESS
) continue; /* We got it in, try again to find it... */
1842 return KERN_FAILURE
; /* Didn't find any, return no good... */
1844 if((unsigned int)mp
&1) { /* Did we timeout? */
1845 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink
); /* Yeah, scream about it! */
1846 splx(s
); /* Restore the interrupt level */
1847 return KERN_FAILURE
; /* Bad hair day, return FALSE... */
1850 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
1852 if(mpv
->PTEr
& 1) { /* Are we write protected? yes, could indicate COW */
1853 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the sink */
1854 splx(s
); /* Restore the interrupt level */
1855 ret
= vm_fault(map
, trunc_page(sink
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, NULL
, 0); /* check for a COW area */
1856 if (ret
== KERN_SUCCESS
) continue; /* We got it in, try again to find it... */
1857 return KERN_FAILURE
; /* Didn't find any, return no good... */
1859 left
= PAGE_SIZE
- (sink
& PAGE_MASK
); /* Get amount left on sink page */
1861 csize
= size
< left
? size
: left
; /* Set amount to copy this pass */
1863 pa
= (vm_offset_t
)((mpv
->physent
->pte1
& ~PAGE_MASK
) | ((unsigned int)sink
& PAGE_MASK
)); /* Get physical address of sink */
1865 bcopy_physvir((char *)source
, (char *)pa
, csize
); /* Do a physical copy, virtually */
1867 hw_set_mod(mpv
->physent
); /* Go set the change of the sink */
1869 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the sink */
1870 splx(s
); /* Open up for interrupts */
1872 sink
+= csize
; /* Move up to start of next page */
1873 source
+= csize
; /* Move up source */
1874 size
-= csize
; /* Set amount for next pass */
1876 return KERN_SUCCESS
;
1882 * Dumps out the mapping stuff associated with a virtual address
1884 void dumpaddr(space_t space
, vm_offset_t va
) {
1890 s
=splhigh(); /* Don't bother me */
1892 mp
= hw_lock_phys_vir(space
, va
); /* Lock the physical entry for this mapping */
1893 if(!mp
) { /* Did we find one? */
1894 splx(s
); /* Restore the interrupt level */
1895 printf("dumpaddr: virtual address (%08X) not mapped\n", va
);
1896 return; /* Didn't find any, return FALSE... */
1898 if((unsigned int)mp
&1) { /* Did we timeout? */
1899 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va
); /* Yeah, scream about it! */
1900 splx(s
); /* Restore the interrupt level */
1901 return; /* Bad hair day, return FALSE... */
1903 printf("dumpaddr: space=%08X; vaddr=%08X\n", space
, va
); /* Say what address were dumping */
1904 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
1908 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock physical entry associated with mapping */
1910 splx(s
); /* Was there something you needed? */
1911 return; /* Tell them we did it */
1917 * Prints out a mapping control block
1921 void dumpmapping(struct mapping
*mp
) { /* Dump out a mapping */
1923 printf("Dump of mapping block: %08X\n", mp
); /* Header */
1924 printf(" next: %08X\n", mp
->next
);
1925 printf(" hashnext: %08X\n", mp
->hashnext
);
1926 printf(" PTEhash: %08X\n", mp
->PTEhash
);
1927 printf(" PTEent: %08X\n", mp
->PTEent
);
1928 printf(" physent: %08X\n", mp
->physent
);
1929 printf(" PTEv: %08X\n", mp
->PTEv
);
1930 printf(" PTEr: %08X\n", mp
->PTEr
);
1931 printf(" pmap: %08X\n", mp
->pmap
);
1933 if(mp
->physent
) { /* Print physent if it exists */
1934 printf("Associated physical entry: %08X %08X\n", mp
->physent
->phys_link
, mp
->physent
->pte1
);
1937 printf("Associated physical entry: none\n");
1940 dumppca(mp
); /* Dump out the PCA information */
1946 * Prints out a PTEG control area
1950 void dumppca(struct mapping
*mp
) { /* PCA */
1955 pca
= (PCA
*)((unsigned int)mp
->PTEhash
&-64); /* Back up to the start of the PCA */
1956 pteg
=(unsigned int *)((unsigned int)pca
-(((hash_table_base
&0x0000FFFF)+1)<<16));
1957 printf(" Dump of PCA: %08X\n", pca
); /* Header */
1958 printf(" PCAlock: %08X\n", pca
->PCAlock
);
1959 printf(" PCAallo: %08X\n", pca
->flgs
.PCAallo
);
1960 printf(" PCAhash: %08X %08X %08X %08X\n", pca
->PCAhash
[0], pca
->PCAhash
[1], pca
->PCAhash
[2], pca
->PCAhash
[3]);
1961 printf(" %08X %08X %08X %08X\n", pca
->PCAhash
[4], pca
->PCAhash
[5], pca
->PCAhash
[6], pca
->PCAhash
[7]);
1962 printf("Dump of PTEG: %08X\n", pteg
); /* Header */
1963 printf(" %08X %08X %08X %08X\n", pteg
[0], pteg
[1], pteg
[2], pteg
[3]);
1964 printf(" %08X %08X %08X %08X\n", pteg
[4], pteg
[5], pteg
[6], pteg
[7]);
1965 printf(" %08X %08X %08X %08X\n", pteg
[8], pteg
[9], pteg
[10], pteg
[11]);
1966 printf(" %08X %08X %08X %08X\n", pteg
[12], pteg
[13], pteg
[14], pteg
[15]);
1971 * Dumps starting with a physical entry
1974 void dumpphys(struct phys_entry
*pp
) { /* Dump from physent */
1980 printf("Dump from physical entry %08X: %08X %08X\n", pp
, pp
->phys_link
, pp
->pte1
);
1981 mp
= hw_cpv(pp
->phys_link
);
1985 mp
= hw_cpv(mp
->next
);
1994 kern_return_t
bmapvideo(vm_offset_t
*info
);
1995 kern_return_t
bmapvideo(vm_offset_t
*info
) {
1997 extern struct vc_info vinfo
;
1999 (void)copyout((char *)&vinfo
, (char *)info
, sizeof(struct vc_info
)); /* Copy out the video info */
2000 return KERN_SUCCESS
;
2003 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
);
2004 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) {
2006 pmap_map_block(current_act()->task
->map
->pmap
, va
, pa
, size
, prot
, attr
, 0); /* Map it in */
2007 return KERN_SUCCESS
;
2010 kern_return_t
bmapmapr(vm_offset_t va
);
2011 kern_return_t
bmapmapr(vm_offset_t va
) {
2013 mapping_remove(current_act()->task
->map
->pmap
, va
); /* Remove map */
2014 return KERN_SUCCESS
;